input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>games/shelf/sawyer_shelf.py
'''
copied and modified from the MELD code
https://github.com/tonyzhaozh/meld
'''
from collections import OrderedDict
import numpy as np
import math
from gym.spaces import Dict, Box
from gym import utils
import os
from gym.envs.mujoco import mujoco_env
SCRIPT_DIR = os.path.dirname(__file__)
import IPython
e = IPython.embed
class SawyerPegShelfEnvMultitask(mujoco_env.MujocoEnv, utils.EzPickle):
'''
Place mug on a shelf
'''
def __init__(self, xml_path=None, box_site_name=None,
action_mode='joint_position', task_mode='weight', sparse=False, stepMinusOne=False, *args, **kwargs):
assert action_mode in ["joint_position", "joint_delta_position", "torque"]
assert task_mode in ["weight", "position"]
assert task_mode == 'weight', "position mode deprecated, at least for now"
self.task_mode = task_mode
self.mug_weight = 5 # used to be None
self.single_task_eval = True
self.step_counter = 0
# goal position
self.goal_x = 0
self.goal_y = -0.1
self.goal_z = 0.22
self.goal_z_top = 0.32
self.goal_pos = np.array([self.goal_x, self.goal_y, self.goal_z])
self.init_mug_pose = np.array(
[-0.125, 0.7, 0]) # different from goal_pos just because different reference frame 0 0.6 0.007
self.auto_reset_task = False
self.auto_reset_task_list = None
if xml_path is None:
print("creating a sample shelf env, for generating tasks")
if action_mode == "torque":
xml_path = os.path.join(SCRIPT_DIR, 'assets/sawyer_shelf_placing_torqueCtrl.xml')
else:
xml_path = os.path.join(SCRIPT_DIR, 'assets/sawyer_shelf_placing_posCtrl.xml')
else:
print("creating a multiple weight shelf env")
print(f"\nControl mode: {action_mode}\n")
print("xml path:", xml_path)
self.xml_path= xml_path
self.goal_site_name = 'goal'
# vars
self.action_mode = action_mode
self.num_joint_dof = 9 # 7 + 2 gripper
self.frame_skip = 100
self.is_eval_env = False
# Sparse reward setting
self.sparseReward = sparse
self.stepMinusOne = stepMinusOne
self.truncation_dist = 0.3
# if distance from goal larger than this,
# get dist(self.truncation_dis) reward every time steps.
# The dist is around 1 in starting pos
# create the env
self.startup = True
mujoco_env.MujocoEnv.__init__(self, xml_path,
self.frame_skip) # self.model.opt.timestep is 0.0025 (w/o frameskip)
self.startup = False
# initial position of joints
self.init_qpos = self.sim.model.key_qpos[0].copy()
self.init_qvel = np.zeros(len(self.data.qvel))
# joint limits
self.limits_lows_joint_pos = self.model.actuator_ctrlrange.copy()[:, 0]
self.limits_highs_joint_pos = self.model.actuator_ctrlrange.copy()[:, 1]
# set the action space (always between -1 and 1 for this env)
self.action_highs = np.ones((self.num_joint_dof,))
self.action_lows = -1 * np.ones((self.num_joint_dof,))
self.action_space = Box(low=self.action_lows, high=self.action_highs)
# set the observation space
obs_size = self.get_obs_dim()
self.observation_space = Box(low=-np.ones(obs_size) * np.inf, high=np.ones(obs_size) * np.inf)
# vel limits
joint_vel_lim = 0.04 ##### 0.07 # magnitude of movement allowed within a dt [deg/dt]
self.limits_lows_joint_vel = -np.array([joint_vel_lim] * self.num_joint_dof)
self.limits_highs_joint_vel = np.array([joint_vel_lim] * self.num_joint_dof)
# ranges
self.action_range = self.action_highs - self.action_lows
self.joint_pos_range = (self.limits_highs_joint_pos - self.limits_lows_joint_pos)
self.joint_vel_range = (self.limits_highs_joint_vel - self.limits_lows_joint_vel)
# ids
self.site_id_goal = self.model.site_name2id(self.goal_site_name)
self.site_id_goal_top = self.model.site_name2id(self.goal_site_name + '_top')
self.site_id_mug = None
self.site_id_mug_top = None
# self.body_id_mug = self.model.body_name2id('mug')
# dict
self.weight2idx = None # need to call py_env.assign_tasks(train_tasks) externally to set
self.idx2weight = None
self.curr_weight = None
self.curr_idx = None
self.curr_weight = None
self.top_reward = None
# this is required to have variety in the position/weight of mug each time env is reset
tasks = self.init_tasks(num_tasks=100, is_eval_env=False, change_weight=False, change_position=True)
self.set_auto_reset_task(tasks)
utils.EzPickle.__init__(self)
def set_auto_reset_task(self, task_list):
self.auto_reset_task = True
self.auto_reset_task_list = task_list
def override_action_mode(self, action_mode):
self.action_mode = action_mode
def reset_model(self):
angles = self.init_qpos.copy()
velocities = self.init_qvel.copy()
self.set_state(angles, velocities) # this sets qpos and qvel + calls sim.forward
self.set_mug_weight(self.mug_weight)
# RESET task every episode, randomly
if self.auto_reset_task:
task_idx = np.random.randint(len(self.auto_reset_task_list))
self.set_task_for_env(self.auto_reset_task_list[task_idx])
return self.get_obs()
def reset(self):
# original mujoco reset
self.sim.reset()
ob = self.reset_model()
# concatenate dummy rew=0,sparserew=0 to the obs
self.step_counter = 0
ob = np.concatenate((ob, np.array([self.step_counter]), np.array([0]), np.array([0])))
return ob
def get_obs_dim(self):
# +1 for concat rew to obs
return len(self.get_obs()) + 3
def _get_joint_velocities(self):
return self.data.qvel.copy()
def get_obs(self):
""" state observation is joint angles + joint velocities + ee pose """
angles = self._get_joint_angles()
velocities = self._get_joint_velocities()
# ee_pose = self._get_ee_pose()
num_joint = 9 ### NOTICE the state ONLY contains joint info, not MUG
angles = angles[:num_joint]
velocities = velocities[:num_joint]
return np.concatenate([angles, velocities, self.goal_pos.copy()])
def step(self, action):
self.do_step(action)
obs = self.get_obs()
self.step_counter += 1
reward, score, sparse_reward = self.compute_reward(get_score=True)
if(self.step_counter>40):
done = True
else:
done = False
if self.sparseReward:
if sparse_reward == 0 and self.stepMinusOne and not done:
sparse_reward -= 0.025
reward = sparse_reward
info = {'score': score}
# append reward to obs
obs = np.concatenate((obs, np.array([self.step_counter]), np.array([sparse_reward]), np.array([reward])))
return obs, reward, done, info
def do_step(self, action):
if self.startup:
feasible_desired_position = 0 * action
self.do_simulation(feasible_desired_position, self.frame_skip)
return
if self.action_mode == 'torque':
torque_limit = 3
action = np.clip(action, self.action_lows, self.action_highs)
action = action * torque_limit
self.do_simulation(action, self.frame_skip)
return
else:
# clip to action limits
action = np.clip(action, self.action_lows, self.action_highs)
# get current position
curr_position = self._get_joint_angles()[:self.num_joint_dof]
# print("POSITION: ", curr_position)
if self.action_mode == 'joint_position':
# scale incoming (-1,1) to self.joint_limits
desired_position = (((action - self.action_lows) * self.joint_pos_range) / self.action_range) + self.limits_lows_joint_pos
# make the
feasible_desired_position = self.make_feasible(curr_position, desired_position)
elif self.action_mode == 'joint_delta_position':
# scale incoming (-1,1) to self.vel_limits
desired_delta_position = (((action - self.action_lows) * self.joint_vel_range) / self.action_range) + self.limits_lows_joint_vel
# add delta
feasible_desired_position = curr_position + desired_delta_position
else:
raise NotImplementedError
self.do_simulation(feasible_desired_position, self.frame_skip)
return
def _get_joint_angles(self):
return self.data.qpos.copy()
def make_feasible(self, curr_position, desired_position):
# compare the implied vel to the max vel allowed
max_vel = self.limits_highs_joint_vel
implied_vel = np.abs(desired_position - curr_position)
# limit the vel
actual_vel = np.min([implied_vel, max_vel], axis=0)
# find the actual position, based on this vel
sign = np.sign(desired_position - curr_position)
actual_difference = sign * actual_vel
feasible_position = curr_position + actual_difference
return feasible_position
def compute_reward(self, get_score=False, goal_id_override=None):
assert goal_id_override is None
self.top_reward = True
self.site_id_goal = self.model.site_name2id(self.goal_site_name)
self.site_id_goal_top = self.model.site_name2id(self.goal_site_name + "_top")
if self.startup or self.single_task_eval:
self.site_id_mug = self.model.site_name2id('mugSite0') # all env will have at least one mug
self.site_id_mug_top = self.model.site_name2id('mugSite0_top')
else:
mug_site_name = f"mugSite{self.curr_idx}"
self.site_id_mug = self.model.site_name2id(mug_site_name)
self.site_id_mug_top = self.model.site_name2id(mug_site_name + "_top")
# get coordinates of the sites in the world frame
mug_xyz = self.data.site_xpos[self.site_id_mug].copy()
goal_xyz = self.data.site_xpos[self.site_id_goal].copy()
mug_xyz_top = self.data.site_xpos[self.site_id_mug_top].copy()
goal_xyz_top = self.data.site_xpos[self.site_id_goal_top].copy()
# score
score = -np.linalg.norm(mug_xyz - goal_xyz)
# distance
dist = 5 * np.linalg.norm(mug_xyz - goal_xyz)
dist_top = 5 * np.linalg.norm(mug_xyz_top - goal_xyz_top)
if self.top_reward:
dist = (dist_top + dist) / 2
sparse_dist = min(dist, self.truncation_dist) # if dist too large: return the reward at truncate_dist
# dense reward
# use GPS cost function: log + quadratic encourages precision near insertion
reward = -(dist ** 2 + math.log10(dist ** 2 + 1e-5))
# sparse reward
# offset the whole reward such that when dist>truncation_dist, the reward will be exactly 0
sparse_reward = -(sparse_dist ** 2 + math.log10(sparse_dist ** 2 + 1e-5))
sparse_reward = sparse_reward - (-(self.truncation_dist ** 2 + math.log10(self.truncation_dist ** 2 + 1e-5)))
if get_score:
return reward, score, sparse_reward
else:
return reward
def init_tasks(self, num_tasks, is_eval_env, change_weight, change_position):
"""To be called externally to obtain samples from the task distribution"""
if is_eval_env:
np.random.seed(1) # pick eval tasks as random from diff seed
else:
np.random.seed(2)
if change_weight:
all_weights = self.get_random_weights(num_tasks)
else:
all_weights = np.ones(num_tasks)*self.mug_weight
if change_position:
all_positions = self.get_random_target_pos(num_tasks)
else:
all_positions = np.ones(num_tasks)*self.goal_x
all_tasks = [[w, p] for w, p in zip(all_weights, all_positions)]
return all_tasks
def get_random_weights(self, num_tasks):
low = 5
high = 15
mug_weights = [np.random.uniform(low, high) for _ in range(num_tasks)]
return mug_weights
def get_random_target_pos(self, num_tasks):
low = -0.2
high = 0.2
xpos = [np.random.uniform(low, high) for _ in range(num_tasks)]
return xpos
def set_task_for_env(self, task):
"""To be called externally to set the task for this environment"""
mug_weight, target_pos = task
self.set_goal_pos(target_pos) # same goal pos
self.mug_weight = mug_weight
self.set_mug_weight(mug_weight)
def set_goal_pos(self, target_pos):
#print("set goal", target_pos)
self.goal_pos = np.array([target_pos, self.goal_y, self.goal_z]) # NOTICE only varying x
self.goal_pos_top = np.array([target_pos, self.goal_y, self.goal_z_top]) # higher z
self.model.site_pos[self.site_id_goal] = self.goal_pos.copy()
self.model.site_pos[self.site_id_goal_top] = self.goal_pos_top.copy()
def assign_tasks(self, tasks):
all_weights = [t[0] for t in tasks]
weight2idx = {}
idx2weight = {}
for i, w in enumerate(all_weights):
weight2idx[w] = i
idx2weight[i] = w
self.weight2idx = weight2idx
self.idx2weight = idx2weight
def set_mug_weight(self, weight):
if self.single_task_eval or weight is None:
return
assert not self.weight2idx is None
self.curr_weight = weight
print("Set weight", weight)
self.curr_idx = self.weight2idx[self.curr_weight]
# instead of setting the weight, move the one with corresponding weight to the init position
angles = self.init_qpos.copy()
velocities = self.init_qvel.copy()
idx_start = self.num_joint_dof + self.curr_idx * 7
idx_end = self.num_joint_dof + self.curr_idx * 7 + 3 # include only xyz
angles[idx_start:idx_end] = self.init_mug_pose.copy()
self.set_state(angles, velocities) # this sets qpos and qvel + calls sim.forward
def set_single_task_eval(self, val):
| |
# coding=utf-8
#
# This file is part of Hypothesis (https://github.com/DRMacIver/hypothesis)
#
# Most of this work is copyright (C) 2013-2015 <NAME>
# (<EMAIL>), but it contains contributions by others. See
# https://github.com/DRMacIver/hypothesis/blob/master/CONTRIBUTING.rst for a
# full list of people who may hold copyright, and consult the git log if you
# need to determine who owns an individual contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import division, print_function, absolute_import
import math
import struct
from decimal import Decimal
from hypothesis.errors import InvalidArgument
from hypothesis.control import assume
from hypothesis.searchstrategy import SearchStrategy
from hypothesis.internal.compat import ArgSpec, text_type, getargspec, \
integer_types, float_to_decimal
from hypothesis.internal.reflection import proxies
from hypothesis.searchstrategy.reprwrapper import ReprWrapperStrategy
__all__ = [
'just', 'one_of',
'none',
'choices',
'booleans', 'integers', 'floats', 'complex_numbers', 'fractions',
'decimals',
'characters', 'text', 'binary',
'tuples', 'lists', 'sets', 'frozensets',
'dictionaries', 'fixed_dictionaries',
'sampled_from',
'builds',
'randoms', 'random_module',
'streaming', 'recursive', 'composite',
]
class FloatKey(object):
def __init__(self, f):
self.value = float_to_int(f)
def __eq__(self, other):
return isinstance(other, FloatKey) and (
other.value == self.value
)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.value)
def convert_value(v):
if isinstance(v, float):
return FloatKey(v)
return (type(v), v)
def cacheable(fn):
import weakref
cache = weakref.WeakValueDictionary()
@proxies(fn)
def cached_strategy(*args, **kwargs):
kwargs_cache_key = set()
try:
for k, v in kwargs.items():
kwargs_cache_key.add((k, convert_value(v)))
except TypeError:
return fn(*args, **kwargs)
cache_key = (
tuple(map(convert_value, args)), frozenset(kwargs_cache_key))
try:
return cache[cache_key]
except TypeError:
return fn(*args, **kwargs)
except KeyError:
result = fn(*args, **kwargs)
cache[cache_key] = result
return result
return cached_strategy
def defines_strategy(strategy_definition):
from hypothesis.searchstrategy.deferred import DeferredStrategy
@proxies(strategy_definition)
def accept(*args, **kwargs):
return DeferredStrategy(strategy_definition, args, kwargs)
return accept
def just(value):
"""Return a strategy which only generates value.
Note: value is not copied. Be wary of using mutable values.
"""
from hypothesis.searchstrategy.misc import JustStrategy
def calc_repr():
return 'just(%s)' % (repr(value),)
return ReprWrapperStrategy(JustStrategy(value), calc_repr)
@defines_strategy
def none():
"""Return a strategy which only generates None."""
return just(None)
def one_of(arg, *args):
"""Return a strategy which generates values from any of the argument
strategies."""
if not args:
check_strategy(arg)
return arg
from hypothesis.searchstrategy.strategies import OneOfStrategy
args = (arg,) + args
for arg in args:
check_strategy(arg)
return OneOfStrategy(args)
@cacheable
@defines_strategy
def integers(min_value=None, max_value=None):
"""Returns a strategy which generates integers (in Python 2 these may be
ints or longs).
If min_value is not None then all values will be >=
min_value. If max_value is not None then all values will be <= max_value
"""
check_valid_integer(min_value)
check_valid_integer(max_value)
check_valid_interval(min_value, max_value, 'min_value', 'max_value')
from hypothesis.searchstrategy.numbers import IntegersFromStrategy, \
BoundedIntStrategy, RandomGeometricIntStrategy, WideRangeIntStrategy
if min_value is None:
if max_value is None:
return (
RandomGeometricIntStrategy() |
WideRangeIntStrategy()
)
else:
return IntegersFromStrategy(0).map(lambda x: max_value - x)
else:
if max_value is None:
return IntegersFromStrategy(min_value)
else:
if min_value == max_value:
return just(min_value)
return BoundedIntStrategy(min_value, max_value)
@cacheable
@defines_strategy
def booleans():
"""Returns a strategy which generates instances of bool."""
from hypothesis.searchstrategy.misc import BoolStrategy
return BoolStrategy()
def is_negative(x):
return math.copysign(1, x) < 0
def count_between_floats(x, y):
assert x <= y
if is_negative(x):
if is_negative(y):
return float_to_int(x) - float_to_int(y) + 1
else:
return count_between_floats(x, -0.0) + count_between_floats(0.0, y)
else:
assert not is_negative(y)
return float_to_int(y) - float_to_int(x) + 1
def float_to_int(value):
return (
struct.unpack(b'!Q', struct.pack(b'!d', value))[0]
)
def int_to_float(value):
return (
struct.unpack(b'!d', struct.pack(b'!Q', value))[0]
)
@cacheable
@defines_strategy
def floats(
min_value=None, max_value=None, allow_nan=None, allow_infinity=None
):
"""Returns a strategy which generates floats.
- If min_value is not None, all values will be >= min_value.
- If max_value is not None, all values will be <= max_value.
- If min_value or max_value is not None, it is an error to enable
allow_nan.
- If both min_value and max_value are not None, it is an error to enable
allow_infinity.
Where not explicitly ruled out by the bounds, all of infinity, -infinity
and NaN are possible values generated by this strategy.
"""
if allow_nan is None:
allow_nan = bool(min_value is None and max_value is None)
elif allow_nan:
if min_value is not None or max_value is not None:
raise InvalidArgument(
'Cannot have allow_nan=%r, with min_value or max_value' % (
allow_nan
))
check_valid_bound(min_value, 'min_value')
check_valid_bound(max_value, 'max_value')
check_valid_interval(min_value, max_value, 'min_value', 'max_value')
if min_value is not None:
min_value = float(min_value)
if max_value is not None:
max_value = float(max_value)
if min_value == float(u'-inf'):
min_value = None
if max_value == float(u'inf'):
max_value = None
if allow_infinity is None:
allow_infinity = bool(min_value is None or max_value is None)
elif allow_infinity:
if min_value is not None and max_value is not None:
raise InvalidArgument(
'Cannot have allow_infinity=%r, with both min_value and '
'max_value' % (
allow_infinity
))
from hypothesis.searchstrategy.numbers import WrapperFloatStrategy, \
GaussianFloatStrategy, BoundedFloatStrategy, ExponentialFloatStrategy,\
JustIntFloats, NastyFloats, FullRangeFloats, \
FixedBoundedFloatStrategy
if min_value is None and max_value is None:
return WrapperFloatStrategy(
GaussianFloatStrategy() |
BoundedFloatStrategy() |
ExponentialFloatStrategy() |
JustIntFloats() |
NastyFloats(allow_nan, allow_infinity) |
FullRangeFloats(allow_nan, allow_infinity)
)
elif min_value is not None and max_value is not None:
if min_value == max_value:
return just(min_value)
elif math.isinf(max_value - min_value):
assert min_value < 0 and max_value > 0
return floats(min_value=0, max_value=max_value) | floats(
min_value=min_value, max_value=0
)
elif count_between_floats(min_value, max_value) > 1000:
critical_values = [
min_value, max_value, min_value + (max_value - min_value) / 2]
if min_value <= 0 <= max_value:
if not is_negative(max_value):
critical_values.append(0.0)
if is_negative(min_value):
critical_values.append(-0.0)
return FixedBoundedFloatStrategy(
lower_bound=min_value, upper_bound=max_value
) | sampled_from(critical_values)
elif is_negative(max_value):
assert is_negative(min_value)
ub_int = float_to_int(max_value)
lb_int = float_to_int(min_value)
assert ub_int <= lb_int
return integers(min_value=ub_int, max_value=lb_int).map(
int_to_float
)
elif is_negative(min_value):
return floats(min_value=min_value, max_value=-0.0) | floats(
min_value=0, max_value=max_value
)
else:
ub_int = float_to_int(max_value)
lb_int = float_to_int(min_value)
assert lb_int <= ub_int
return integers(min_value=lb_int, max_value=ub_int).map(
int_to_float
)
elif min_value is not None:
critical_values = [min_value]
if allow_infinity:
critical_values.append(float(u'inf'))
if is_negative(min_value):
critical_values.append(-0.0)
if min_value <= 0:
critical_values.append(0.0)
return (
floats(allow_infinity=allow_infinity, allow_nan=False).map(
lambda x: assume(not math.isnan(x)) and min_value + abs(x)
)
) | sampled_from(critical_values)
else:
assert max_value is not None
critical_values = [max_value]
if allow_infinity:
critical_values.append(float(u'-inf'))
if max_value >= 0:
critical_values.append(-0.0)
if not is_negative(max_value):
critical_values.append(0.0)
return (
floats(allow_infinity=allow_infinity, allow_nan=False).map(
lambda x: assume(not math.isnan(x)) and max_value - abs(x)
)
) | sampled_from(critical_values)
@cacheable
@defines_strategy
def complex_numbers():
"""Returns a strategy that generates complex numbers."""
from hypothesis.searchstrategy.numbers import ComplexStrategy
return ComplexStrategy(
tuples(floats(), floats())
)
@cacheable
@defines_strategy
def tuples(*args):
"""Return a strategy which generates a tuple of the same length as args by
generating the value at index i from args[i].
e.g. tuples(integers(), integers()) would generate a tuple of length
two with both values an integer.
"""
for arg in args:
check_strategy(arg)
from hypothesis.searchstrategy.collections import TupleStrategy
return TupleStrategy(args, tuple)
@defines_strategy
def sampled_from(elements):
"""Returns a strategy which generates any value present in the iterable
elements.
Note that as with just, values will not be copied and thus you
should be careful of using mutable data
"""
from hypothesis.searchstrategy.misc import SampledFromStrategy, \
JustStrategy
elements = tuple(iter(elements))
if not elements:
raise InvalidArgument(
'sampled_from requires at least one value'
)
if len(elements) == 1:
return JustStrategy(elements[0])
else:
return SampledFromStrategy(elements)
@cacheable
@defines_strategy
def lists(
elements=None, min_size=None, average_size=None, max_size=None,
unique_by=None, unique=False,
):
"""Returns a list containing values drawn from elements length in the
interval [min_size, max_size] (no bounds in that direction if these are
None). If max_size is 0 then elements may be None and only the empty list
will be drawn.
average_size may be used as a size hint to roughly control the size
of list but it may not be the actual average of sizes you get, due
to a variety of factors.
If unique is True (or something that evaluates to True), we compare direct
object equality, as if unique_by was `lambda x: x`. This comparison only
works for hashable types.
if unique_by is not None it must be a function returning a hashable type
when given a value drawn from elements. The resulting list will satisfy the
condition that for i != j, unique_by(result[i]) != unique_by(result[j]).
"""
if unique:
if unique_by is not None:
raise InvalidArgument((
'cannot specify both unique and unique_by (you probably only '
'want to set unique_by)'
))
else:
unique_by = lambda x: x
if unique_by is not None:
from hypothesis.searchstrategy.collections import UniqueListStrategy
if max_size == 0:
return builds(list)
check_strategy(elements)
if min_size is not None and elements.template_upper_bound < min_size:
raise InvalidArgument((
'Cannot generate unique lists of size %d from %r, which '
'contains no more than %d distinct values') | |
import argparse
import mock
import requests
from cloud_info_provider.providers import ooi as ooi_provider
from cloud_info_provider.providers import openstack as os_provider
from cloud_info_provider.tests import base, data
from cloud_info_provider.tests import utils as utils
from six.moves.urllib.parse import urljoin
FAKES = data.OS_FAKES
class OpenStackProviderOptionsTest(base.TestCase):
def test_populate_parser(self):
parser = argparse.ArgumentParser(conflict_handler="resolve")
provider = os_provider.OpenStackProvider
provider.populate_parser(parser)
opts = parser.parse_args(
[
"--os-username",
"foo",
"--os-password",
"<PASSWORD>",
"--os-auth-url",
"http://example.org:5000",
"--insecure",
"--all-images",
"--select-flavors",
"public",
"--os-region",
"North pole",
]
)
self.assertEqual(opts.os_username, "foo")
self.assertEqual(opts.os_password, "<PASSWORD>")
self.assertEqual(opts.os_auth_url, "http://example.org:5000")
self.assertEqual(opts.os_region, "North pole")
self.assertEqual(opts.insecure, True)
self.assertEqual(opts.all_images, True)
self.assertEqual(opts.select_flavors, "public")
class OpenStackProviderAuthTest(base.TestCase):
# Do not limit diff output on failures
maxDiff = None
def setUp(self):
super(OpenStackProviderAuthTest, self).setUp()
class FakeProvider(os_provider.OpenStackProvider):
def __init__(self, opts):
self.project_id = None
self.os_region = None
self.opts = mock.Mock()
self.provider = FakeProvider(None)
def test_rescope_simple(self):
self.provider.auth_refresher = None
with utils.nested(
mock.patch("keystoneauth1.loading." "load_auth_from_argparse_arguments"),
mock.patch("keystoneauth1.loading." "load_session_from_argparse_arguments"),
) as (_, m_load_session):
session = mock.Mock()
session.get_project_id.return_value = "foo"
m_load_session.return_value = session
self.provider._rescope_project("foo", "bar")
self.assertEqual("foo", self.provider.project_id)
def test_rescope_refresh(self):
m_refresh = mock.Mock()
self.provider.auth_refresher = mock.Mock()
self.provider.auth_refresher.refresh = m_refresh
with utils.nested(
mock.patch("keystoneauth1.loading." "load_auth_from_argparse_arguments"),
mock.patch("keystoneauth1.loading." "load_session_from_argparse_arguments"),
) as (_, m_load_session):
session = mock.Mock()
session.get_project_id.return_value = "foo"
m_load_session.return_value = session
self.provider._rescope_project("foo", "bar")
self.assertEqual("foo", self.provider.project_id)
m_refresh.assert_called_with(self.provider, project_id="foo", vo="bar")
class OpenStackProviderTest(base.TestCase):
# Do not limit diff output on failures
maxDiff = None
def setUp(self):
super(OpenStackProviderTest, self).setUp()
class FakeProvider(os_provider.OpenStackProvider):
def __init__(self, opts):
self.nova = mock.Mock()
self.glance = mock.Mock()
self.glance.http_client.get_endpoint.return_value = (
"http://glance.example.org:9292/v2"
)
self.session = mock.Mock()
self.session.get_project_id.return_value = "TEST_PROJECT_ID"
self.auth_plugin = mock.MagicMock()
self.auth_plugin.auth_url = "http://foo.example.org:5000/v2"
self.static = mock.Mock()
self._ca_info = {
"http://foo.example.org:5000/v2": {
"issuer": "foo",
"trusted_cas": [],
}
}
self.insecure = False
self.os_project_id = None
self.os_region = None
self.select_flavors = "all"
self._rescope_project = mock.Mock()
self.all_images = False
self.provider = FakeProvider(None)
def assert_resources(self, expected, observed, template=None, ignored_fields=[]):
if template:
fields = utils.get_variables_from_template(template, ignored_fields)
else:
fields = []
for k, v in observed.items():
self.assertDictEqual(expected[k], v)
for f in fields:
self.assertIn(f, v)
def test_get_shares(self):
with utils.nested(
mock.patch.object(self.provider.static, "get_compute_shares"),
) as (m_get_compute_shares,):
m_get_compute_shares.return_value = {
"vo1": {"auth": {"project_id": "foobar"}}
}
shares = self.provider.get_compute_shares(**{"auth": {"project_id": None}})
self.assertEqual("foobar", shares["vo1"]["project"])
self.assertTrue(m_get_compute_shares.called)
def test_get_share(self):
self.auth_plugin = mock.Mock()
share = self.provider.get_compute_share(**{"auth": {"project_id": None}})
self.assertIn("project_name", share)
self.assertIn("project_domain_name", share)
def test_get_templates_with_defaults(self):
expected_templates = {}
url = "http://schemas.openstack.org/template/resource"
for f in FAKES.flavors:
expected_templates[f.id] = {
"template_memory": f.ram,
"template_cpu": f.vcpus,
"template_id": "%s#%s" % (url, f.id),
"template_native_id": "%s" % f.id,
"template_platform": "amd64",
"template_network": "private",
"template_disk": f.disk,
"template_ephemeral": f.ephemeral,
}
with utils.nested(
mock.patch.object(self.provider.static, "get_template_defaults"),
mock.patch.object(self.provider.nova.flavors, "list"),
) as (m_get_template_defaults, m_flavors_list):
m_get_template_defaults.return_value = {}
m_flavors_list.return_value = FAKES.flavors
templates = self.provider.get_templates(**{"auth": {"project_id": None}})
self.assertTrue(m_get_template_defaults.called)
self.assert_resources(
expected_templates,
templates,
template="compute.ldif",
ignored_fields=[
"compute_api_type",
"compute_api_version",
"compute_api_endpoint_technology",
"compute_capabilities",
"compute_endpoint_url",
"compute_hypervisor",
"compute_hypervisor_version",
"compute_middleware",
"compute_middleware_developer",
"compute_middleware_version",
"compute_production_level",
"compute_api_authn_method",
"compute_service_name",
"compute_service_production_level",
"compute_total_cores",
"compute_total_ram",
"image_id",
"image_name",
"image_os_family",
"image_os_name",
"image_os_version",
"image_platform",
"image_version",
"image_description",
"image_marketplace_id",
],
)
def test_get_templates_with_defaults_from_static(self):
expected_templates = {}
url = "http://schemas.openstack.org/template/resource"
for f in FAKES.flavors:
expected_templates[f.id] = {
"template_memory": f.ram,
"template_cpu": f.vcpus,
"template_id": "%s#%s" % (url, f.id),
"template_native_id": "%s" % f.id,
"template_platform": "i686",
"template_network": "private",
"template_disk": f.disk,
"template_ephemeral": f.ephemeral,
}
with utils.nested(
mock.patch.object(self.provider.static, "get_template_defaults"),
mock.patch.object(self.provider.nova.flavors, "list"),
) as (m_get_template_defaults, m_flavors_list):
m_get_template_defaults.return_value = {"template_platform": "i686"}
m_flavors_list.return_value = FAKES.flavors
templates = self.provider.get_templates(**{"auth": {"project_id": None}})
self.assertTrue(m_get_template_defaults.called)
self.assert_resources(
expected_templates,
templates,
template="compute.ldif",
ignored_fields=[
"compute_api_type",
"compute_api_version",
"compute_api_endpoint_technology",
"compute_capabilities",
"compute_endpoint_url",
"compute_hypervisor",
"compute_hypervisor_version",
"compute_middleware",
"compute_middleware_developer",
"compute_middleware_version",
"compute_production_level",
"compute_api_authn_method",
"compute_service_name",
"compute_service_production_level",
"compute_total_cores",
"compute_total_ram",
"image_id",
"image_name",
"image_version",
"image_description",
"image_marketplace_id",
],
)
def test_get_all_templates(self):
"""Tests that all templates/flavors are returned"""
expected_templates = {}
url = "http://schemas.openstack.org/template/resource"
for f in FAKES.flavors:
expected_templates[f.id] = {
"template_memory": f.ram,
"template_cpu": f.vcpus,
"template_id": "%s#%s" % (url, f.id),
"template_native_id": "%s" % f.id,
"template_platform": "i686",
"template_network": "private",
"template_disk": f.disk,
"template_ephemeral": f.ephemeral,
}
self.provider.select_flavors = "all"
with utils.nested(
mock.patch.object(self.provider.static, "get_template_defaults"),
mock.patch.object(self.provider.nova.flavors, "list"),
) as (m_get_template_defaults, m_flavors_list):
m_get_template_defaults.return_value = {"template_platform": "i686"}
m_flavors_list.return_value = FAKES.flavors
templates = self.provider.get_templates(**{"auth": {"project_id": None}})
self.assertTrue(m_get_template_defaults.called)
# Extract required fields from compute.ldif template excluding fields
# extracted that are not related to the flavors
self.assert_resources(
expected_templates,
templates,
template="compute.ldif",
ignored_fields=[
"compute_service_name",
"compute_hypervisor",
"compute_api_authn_method",
"compute_total_ram",
"image_marketplace_id",
"compute_middleware_developer",
"compute_production_level",
"compute_production_level",
"compute_api_type",
"compute_api_endpoint_technology",
"compute_api_version",
"compute_endpoint_url",
"compute_service_production_level",
"compute_capabilities",
"compute_total_cores",
"compute_middleware",
"compute_hypervisor_version",
"compute_middleware_version",
"image_description",
"image_id",
"image_name",
"image_version",
],
)
def test_get_public_templates(self):
"""Tests that only public templates/flavors are returned"""
expected_templates = {}
url = "http://schemas.openstack.org/template/resource"
for f in FAKES.flavors:
if not f.is_public:
continue
expected_templates[f.id] = {
"template_memory": f.ram,
"template_cpu": f.vcpus,
"template_id": "%s#%s" % (url, f.id),
"template_native_id": "%s" % f.id,
"template_platform": "i686",
"template_network": "private",
"template_disk": f.disk,
"template_ephemeral": f.ephemeral,
}
self.provider.select_flavors = "public"
with utils.nested(
mock.patch.object(self.provider.static, "get_template_defaults"),
mock.patch.object(self.provider.nova.flavors, "list"),
) as (m_get_template_defaults, m_flavors_list):
m_get_template_defaults.return_value = {"template_platform": "i686"}
m_flavors_list.return_value = FAKES.flavors
templates = self.provider.get_templates(**{"auth": {"project_id": None}})
self.assertTrue(m_get_template_defaults.called)
# Extract required fields from compute.ldif template excluding fields
# extracted that are not related to the flavors
self.assert_resources(
expected_templates,
templates,
template="compute.ldif",
ignored_fields=[
"compute_service_name",
"compute_hypervisor",
"compute_api_authn_method",
"compute_total_ram",
"image_marketplace_id",
"compute_middleware_developer",
"compute_production_level",
"compute_production_level",
"compute_api_type",
"compute_api_endpoint_technology",
"compute_api_version",
"compute_endpoint_url",
"compute_service_production_level",
"compute_capabilities",
"compute_total_cores",
"compute_middleware",
"compute_hypervisor_version",
"compute_middleware_version",
"image_description",
"image_id",
"image_name",
"image_version",
],
)
def test_get_private_templates(self):
"""Tests that only private templates/flavors are returned"""
expected_templates = {}
url = "http://schemas.openstack.org/template/resource"
for f in FAKES.flavors:
if f.is_public:
continue
expected_templates[f.id] = {
"template_memory": f.ram,
"template_cpu": f.vcpus,
"template_id": "%s#%s" % (url, f.id),
"template_native_id": "%s" % f.id,
"template_platform": "i686",
"template_network": "private",
"template_disk": f.disk,
"template_ephemeral": f.ephemeral,
}
self.provider.select_flavors = "private"
with utils.nested(
mock.patch.object(self.provider.static, "get_template_defaults"),
mock.patch.object(self.provider.nova.flavors, "list"),
) as (m_get_template_defaults, m_flavors_list):
m_get_template_defaults.return_value = {"template_platform": "i686"}
m_flavors_list.return_value = FAKES.flavors
templates = self.provider.get_templates(**{"auth": {"project_id": None}})
self.assertTrue(m_get_template_defaults.called)
# Extract required fields from compute.ldif template excluding fields
# extracted that are not related to the flavors
self.assert_resources(
expected_templates,
templates,
template="compute.ldif",
ignored_fields=[
"compute_service_name",
"compute_hypervisor",
"compute_api_authn_method",
"compute_total_ram",
"image_marketplace_id",
"compute_middleware_developer",
"compute_production_level",
"compute_production_level",
"compute_api_type",
"compute_api_endpoint_technology",
"compute_api_version",
"compute_endpoint_url",
"compute_service_production_level",
"compute_capabilities",
"compute_total_cores",
"compute_middleware",
"compute_hypervisor_version",
"compute_middleware_version",
"image_description",
"image_id",
"image_name",
"image_os_family",
"image_os_name",
"image_os_version",
"image_platform",
"image_version",
],
)
def test_get_images(self):
# XXX move this to a custom class?
# XXX add docker information
expected_images = {
"bar id": {
"name": "barimage",
"id": "bar id",
"metadata": {},
"file": "v2/bar id/file",
"image_description": None,
"image_name": "barimage",
"image_os_family": None,
"image_os_name": None,
"image_os_version": None,
"image_platform": "amd64",
"image_version": None,
"image_marketplace_id": "%s"
% urljoin(
self.provider.glance.http_client.get_endpoint(), "v2/bar id/file"
),
"image_id": "http://schemas.openstack.org/template/os#bar_id",
"image_native_id": "bar id",
"image_accel_type": None,
"image_access_info": "none",
"image_minimal_cpu": None,
"image_minimal_ram": None,
"image_minimal_accel": None,
"image_recommended_cpu": None,
"image_recommended_ram": None,
"image_recommended_accel": None,
"image_size": None,
"image_software": [],
"image_traffic_in": [],
"image_traffic_out": [],
"image_context_format": None,
"other_info": [],
},
"foo.id": {
"name": "fooimage",
"id": "foo.id",
"metadata": {},
"marketplace": "http://example.org/",
"file": "v2/foo.id/file",
"image_description": None,
"image_name": "fooimage",
"image_os_family": None,
"image_os_name": None,
"image_os_version": None,
"image_platform": "amd64",
"image_version": None,
"image_marketplace_id": "http://example.org/",
"image_id": "http://schemas.openstack.org/template/os#foo.id",
"image_native_id": "foo.id",
"image_accel_type": None,
"image_access_info": "none",
"image_minimal_cpu": None,
"image_minimal_ram": None,
"image_minimal_accel": None,
"image_recommended_cpu": None,
"image_recommended_ram": None,
"image_recommended_accel": None,
"image_size": None,
"image_software": [],
"image_traffic_in": [],
"image_traffic_out": [],
"image_context_format": None,
"other_info": ["base_mpuri=foobar"],
"APPLIANCE_ATTRIBUTES": '{"ad:base_mpuri": "foobar"}',
},
"baz id": {
"name": "bazimage",
"id": "baz id",
"file": "v2/baz id/file",
"image_description": None,
"image_name": "bazimage",
"image_os_family": None,
"image_os_name": None,
"image_os_version": None,
"image_platform": "amd64",
"image_version": None,
"image_marketplace_id": "%s"
% urljoin(
self.provider.glance.http_client.get_endpoint(), "v2/baz id/file"
),
"image_id": "http://schemas.openstack.org/template/os#baz_id",
"image_native_id": "baz id",
"docker_id": "sha1:xxxxxxxxxxxxxxxxxxxxxxxxxx",
"docker_tag": "latest",
"docker_name": "test/image",
"image_accel_type": None,
"image_access_info": "none",
"image_minimal_cpu": None,
"image_minimal_ram": None,
"image_minimal_accel": None,
"image_recommended_cpu": None,
"image_recommended_ram": None,
"image_recommended_accel": None,
"image_size": None,
"image_software": [],
"image_traffic_in": [],
"image_traffic_out": [],
"image_context_format": None,
"other_info": [],
},
}
with utils.nested(
mock.patch.object(self.provider.static, "get_image_defaults"),
mock.patch.object(self.provider.glance.images, "list"),
) as (m_get_image_defaults, m_images_list):
m_get_image_defaults.return_value = {}
m_images_list.return_value = FAKES.images
images = self.provider.get_images(**{"auth": {"project_id": None}})
self.assertTrue(m_get_image_defaults.called)
# Filter fields from the template that are not related to images
self.assert_resources(
expected_images,
images,
template="compute.ldif",
ignored_fields=[
"compute_service_name",
"compute_api_type",
"compute_api_version",
("compute_api_endpoint" "_technology"),
"compute_capabilities",
"compute_api_authn_method",
"compute_total_ram",
"compute_middleware",
"compute_middleware_developer",
"compute_middleware_version",
"compute_endpoint_url",
"compute_hypervisor",
"compute_hypervisor_version",
"compute_production_level",
("compute_service" "_production_level"),
"compute_total_cores",
"compute_total_ram",
"template_platform",
"template_cpu",
"template_memory",
"template_network",
"template_disk",
"template_ephemeral",
"template_id",
],
)
def test_get_markeplace_images(self):
expected_images = ["foo.id"]
self.provider.all_images = False
with utils.nested(
mock.patch.object(self.provider.static, "get_image_defaults"),
mock.patch.object(self.provider.glance.images, "list"),
) as (m_get_image_defaults, m_images_list):
m_get_image_defaults.return_value = {}
m_images_list.return_value = FAKES.images
images = self.provider.get_images(**{"auth": {"project_id": None}})
self.assertTrue(m_get_image_defaults.called)
self.assertItemsEqual(images.keys(), expected_images)
def test_get_endpoints_with_defaults_from_static(self):
expected_endpoints = {
"endpoints": {
"http://foo.example.org:5000/v2": {
"compute_api_type": "OpenStack",
# As version is extracted from the URL default is not used
"compute_api_version": "v2",
"compute_endpoint_id": "1b7f14c87d8c42ad962f4d3a5fd13a77",
"compute_nova_endpoint_url": "https://cloud.example.org:8774/v1.1/ce2d",
"compute_nova_api_version": "v1.1",
"compute_endpoint_url": "http://foo.example.org:5000/v2",
}
},
"compute_middleware_developer": "OpenStack",
"compute_middleware": "OpenStack Nova",
"compute_service_name": "http://foo.example.org:5000/v2",
}
with utils.nested(
mock.patch.object(self.provider.static, "get_compute_endpoint_defaults"),
mock.patch.object(self.provider, "get_goc_info"),
) as (m_get_endpoint_defaults, m_get_goc_info):
m_get_endpoint_defaults.return_value = {
"compute_occi_api_version": "11.11",
"compute_compute_api_version": "99.99",
}
m_get_goc_info.return_value = {"gocfoo": "baz"}
r = mock.Mock()
r.service_catalog = FAKES.catalog
self.provider.auth_plugin.get_access.return_value = r
endpoints = self.provider.get_compute_endpoints(
**{"auth": {"project_id": None}}
)
self.assertTrue(m_get_endpoint_defaults.called)
m_get_goc_info.assert_called_with("http://foo.example.org:5000/v2", False)
self.assertEqual("baz", endpoints.pop("gocfoo"))
for k, v in expected_endpoints["endpoints"].items():
self.assertDictContainsSubset(v, endpoints["endpoints"].get(k, {}))
def test_get_endpoints_with_defaults(self):
expected_endpoints = {
| |
= StringIO()
geometry.show_sorted(
sites_cart=xrs.sites_cart(),
site_labels=xrs.scatterers().extract_labels(),
f=init_out)
energy_original = geometry.energies_sites(sites_cart=xrs.sites_cart())
t0 = time()
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
pklstr = pickle.dumps(geometry)
t1 = time()
grm_from_file = pickle.loads(pklstr)
t2 = time()
# Fails here:
energy_from_pickle = grm_from_file.energies_sites(sites_cart=xrs.sites_cart())
assert approx_equal(energy_original.target, energy_from_pickle.target)
print("Time pickling/unpickling: %.4f, %.4f" % (t1-t0, t2-t1))
grm_from_file.show_sorted(
sites_cart=xrs.sites_cart(),
site_labels=xrs.scatterers().extract_labels(),
f=from_file_out)
# print "INITIAL"
init_v = init_out.getvalue()
# print init_v
# print "="*50
# print "From disc"
from_file_v = from_file_out.getvalue()
# print from_file_v
# STOP()
assert not show_diff(init_v, from_file_v)
return grm_from_file
def test_simple_protein(
mon_lib_srv, ener_lib, prefix="tst_grm_pickling_simple_protein"):
geometry, xrs = make_initial_grm(mon_lib_srv, ener_lib, raw_records1)
make_geo_pickle_unpickle(geometry, xrs, prefix)
def test_nucleic_acid(mon_lib_srv, ener_lib, prefix="tst_grm_pickling_na"):
open("%s.pdb" % prefix, "w").\
write(raw_records2)
from mmtbx import monomer_library
params = monomer_library.pdb_interpretation.master_params.extract()
params.secondary_structure.enabled=True
processed_pdb_file = monomer_library.pdb_interpretation.run(
args=["%s.pdb" % prefix],
params=params,
strict_conflict_handling=False,
log=sys.stdout)
geo = processed_pdb_file.geometry_restraints_manager()
assert geo.get_n_hbond_proxies() == 6
assert geo.get_n_hangle_proxies() == 12
assert geo.get_n_stacking_proxies() == 2
assert geo.get_n_parallelity_bp_proxies() == 2
make_geo_pickle_unpickle(geo, processed_pdb_file.xray_structure(), prefix)
def test_ramachandran(mon_lib_srv, ener_lib, prefix="tst_grm_pickling_rama"):
open("%s.pdb" % prefix, "w").\
write(raw_records3)
from mmtbx import monomer_library
params = monomer_library.pdb_interpretation.master_params.extract()
params.peptide_link.ramachandran_restraints=True
processed_pdb_file = monomer_library.pdb_interpretation.run(
args=["%s.pdb" % prefix],
params=params,
strict_conflict_handling=False,
log=sys.stdout)
geo = processed_pdb_file.geometry_restraints_manager()
assert geo.get_n_ramachandran_proxies() == 1
make_geo_pickle_unpickle(geo, processed_pdb_file.xray_structure(), prefix)
def test_cbeta(mon_lib_srv, ener_lib, prefix="tst_grm_pickling_cbeta"):
open("%s.pdb" % prefix, "w").\
write(raw_records3)
from mmtbx import monomer_library
params = monomer_library.pdb_interpretation.master_params.extract()
params.c_beta_restraints=True
processed_pdb_file = monomer_library.pdb_interpretation.run(
args=["%s.pdb" % prefix],
params=params,
strict_conflict_handling=False,
log=sys.stdout)
geo = processed_pdb_file.geometry_restraints_manager()
assert geo.get_n_c_beta_torsion_proxies() == 6
make_geo_pickle_unpickle(geo, processed_pdb_file.xray_structure(), prefix)
def test_reference_coordinate(mon_lib_srv, ener_lib, prefix="tst_grm_pickling_ref_coor"):
from mmtbx.geometry_restraints import reference
# for some strange reason without importing this the code doesn't work...
from cctbx import adp_restraints # import dependency
pdb_inp = iotbx.pdb.input(source_info=None, lines=raw_records3)
params = monomer_library.pdb_interpretation.master_params.extract()
params.reference_coordinate_restraints.enabled=False
processed_pdb_file = monomer_library.pdb_interpretation.process(
mon_lib_srv=mon_lib_srv,
ener_lib=ener_lib,
params=params,
strict_conflict_handling=False,
pdb_inp=pdb_inp,
log=sys.stdout)
geo = processed_pdb_file.geometry_restraints_manager()
pdb_hierarchy = processed_pdb_file.all_chain_proxies.pdb_hierarchy
sites_cart = pdb_hierarchy.atoms().extract_xyz()
rcp = reference.add_coordinate_restraints(sites_cart=sites_cart)
geo.adopt_reference_coordinate_restraints_in_place(rcp)
# print "number of rcr proxies:", geo.get_n_reference_coordinate_proxies()
make_geo_pickle_unpickle(geo, processed_pdb_file.xray_structure(), prefix)
def test_secondary_structure(mon_lib_srv, ener_lib, prefix="tst_grm_pickling_ss"):
pdb_str = """
HELIX 2 2 ASP A 37 GLY A 48 1 12
CRYST1 113.068 113.068 53.292 90.00 90.00 90.00 I 41 8
ATOM 266 N ASP A 37 6.265 61.752 14.145 1.00 35.17 N
ATOM 267 CA ASP A 37 5.251 62.335 15.056 1.00 37.08 C
ATOM 268 C ASP A 37 5.433 61.900 16.511 1.00 37.79 C
ATOM 269 O ASP A 37 6.443 61.316 16.858 1.00 37.54 O
ATOM 270 CB ASP A 37 3.827 62.120 14.521 1.00 37.53 C
ATOM 271 CG ASP A 37 3.427 60.683 14.400 1.00 38.76 C
ATOM 272 OD1 ASP A 37 4.001 59.819 15.070 1.00 38.84 O
ATOM 273 OD2 ASP A 37 2.506 60.327 13.624 1.00 41.78 O
ATOM 274 N ASP A 38 4.467 62.205 17.382 1.00 38.31 N
ATOM 275 CA ASP A 38 4.609 61.829 18.786 1.00 38.69 C
ATOM 276 C ASP A 38 4.781 60.335 18.955 1.00 37.78 C
ATOM 277 O ASP A 38 5.598 59.886 19.760 1.00 38.31 O
ATOM 278 CB ASP A 38 3.376 62.258 19.608 1.00 39.51 C
ATOM 279 CG ASP A 38 3.378 63.724 19.972 1.00 42.76 C
ATOM 280 OD1 ASP A 38 4.462 64.343 20.161 1.00 48.07 O
ATOM 281 OD2 ASP A 38 2.295 64.337 20.144 1.00 47.65 O
ATOM 282 N ALA A 39 4.003 59.561 18.209 1.00 36.68 N
ATOM 283 CA ALA A 39 4.065 58.107 18.287 1.00 36.58 C
ATOM 284 C ALA A 39 5.433 57.607 17.773 1.00 35.91 C
ATOM 285 O ALA A 39 6.014 56.661 18.319 1.00 35.28 O
ATOM 286 CB ALA A 39 2.947 57.491 17.483 1.00 36.33 C
ATOM 287 N GLY A 40 5.948 58.257 16.745 1.00 35.33 N
ATOM 288 CA GLY A 40 7.296 57.938 16.267 1.00 35.20 C
ATOM 289 C GLY A 40 8.386 58.218 17.295 1.00 34.81 C
ATOM 290 O GLY A 40 9.320 57.432 17.456 1.00 34.92 O
ATOM 291 N ARG A 41 8.297 59.351 17.981 1.00 35.65 N
ATOM 292 CA ARG A 41 9.300 59.698 18.970 1.00 35.75 C
ATOM 293 C ARG A 41 9.257 58.681 20.093 1.00 37.10 C
ATOM 294 O ARG A 41 10.295 58.291 20.642 1.00 37.65 O
ATOM 295 CB ARG A 41 9.090 61.118 19.494 1.00 36.15 C
ATOM 296 CG ARG A 41 9.575 62.196 18.563 1.00 35.51 C
ATOM 297 CD ARG A 41 9.383 63.592 19.134 1.00 38.98 C
ATOM 298 NE ARG A 41 7.999 64.012 18.913 1.00 40.46 N
ATOM 299 CZ ARG A 41 7.537 64.446 17.753 1.00 41.44 C
ATOM 300 NH1 ARG A 41 8.326 64.534 16.682 1.00 42.62 N
ATOM 301 NH2 ARG A 41 6.261 64.776 17.649 1.00 43.05 N
ATOM 302 N ALA A 42 8.053 58.238 20.441 1.00 38.18 N
ATOM 303 CA ALA A 42 7.878 57.270 21.524 1.00 38.42 C
ATOM 304 C ALA A 42 8.398 55.909 21.116 1.00 38.32 C
ATOM 305 O ALA A 42 8.952 55.181 21.927 1.00 37.15 O
ATOM 306 CB ALA A 42 6.387 57.158 21.948 1.00 38.91 C
ATOM 307 N THR A 43 8.209 55.567 19.842 1.00 37.57 N
ATOM 308 CA THR A 43 8.756 54.324 19.328 1.00 37.21 C
ATOM 309 C THR A 43 10.284 54.321 19.472 1.00 36.33 C
ATOM 310 O THR A 43 10.842 53.315 19.824 1.00 36.44 O
ATOM 311 CB THR A 43 8.316 54.130 17.873 1.00 37.54 C
ATOM 312 OG1 THR A 43 6.890 53.948 17.829 1.00 38.41 O
ATOM 313 CG2 THR A 43 8.897 52.837 17.280 1.00 36.05 C
ATOM 314 N LEU A 44 10.948 55.436 19.192 1.00 36.74 N
ATOM 315 CA LEU A 44 12.410 55.504 19.283 1.00 36.66 C
ATOM 316 C LEU A 44 12.877 55.316 20.729 1.00 37.24 C
ATOM 317 O LEU A 44 13.840 54.613 20.978 1.00 36.26 O
ATOM 318 CB LEU A 44 12.957 56.819 18.725 1.00 36.22 C
ATOM 319 CG LEU A 44 12.786 57.061 17.209 1.00 34.97 C
ATOM 320 CD1 LEU A 44 13.386 58.400 16.795 1.00 33.89 C
ATOM 321 CD2 LEU A 44 13.399 55.928 16.404 1.00 33.15 C
ATOM 322 N ARG A 45 12.147 55.914 21.675 1.00 38.31 N
ATOM 323 CA ARG A 45 12.485 55.801 23.095 1.00 39.49 C
ATOM 324 C ARG A 45 12.296 54.381 23.589 1.00 39.97 C
ATOM 325 O ARG A 45 13.113 53.864 24.338 1.00 40.63 O
ATOM 326 CB ARG A 45 11.614 56.757 23.935 1.00 39.94 C
ATOM 327 N ARG A 46 11.186 53.775 23.179 1.00 41.00 N
ATOM 328 CA ARG A 46 10.849 52.397 23.503 1.00 41.33 C
ATOM 329 C ARG A 46 11.912 51.412 23.025 1.00 40.34 C
ATOM 330 O ARG A 46 12.278 50.485 23.731 1.00 39.81 O
ATOM 331 CB ARG A 46 9.524 52.063 22.835 1.00 41.72 C
ATOM 332 CG ARG A 46 8.773 50.911 23.395 1.00 46.36 C
ATOM 333 CD ARG A 46 7.352 50.836 22.851 1.00 51.59 C
ATOM 334 NE ARG A 46 7.345 50.162 21.548 1.00 57.79 N
ATOM 335 CZ ARG A 46 6.851 50.659 20.399 1.00 61.01 C
ATOM 336 NH1 ARG A 46 6.282 51.872 20.344 1.00 62.67 N
ATOM 337 NH2 ARG A 46 6.918 49.916 19.290 1.00 61.73 N
ATOM 338 N LEU A 47 12.402 51.620 21.809 1.00 39.47 N
ATOM 339 CA LEU A 47 13.439 50.765 21.223 1.00 38.25 C
ATOM 340 C LEU A 47 14.826 51.006 21.800 1.00 37.39 C
ATOM 341 O LEU A 47 15.742 50.247 21.530 1.00 38.19 O
ATOM 342 CB LEU A 47 13.502 51.010 19.712 1.00 38.57 C
ATOM 343 CG LEU A 47 12.264 50.556 18.951 1.00 38.58 C
ATOM 344 CD1 LEU A 47 12.346 51.046 17.517 1.00 38.92 C
ATOM 345 CD2 LEU A 47 12.101 49.038 19.050 1.00 38.51 C
ATOM 346 N GLY A 48 14.997 52.083 22.557 1.00 36.96 N
ATOM 347 CA GLY A 48 16.262 52.383 23.191 1.00 35.72 C
ATOM 348 C GLY A 48 17.323 52.969 22.286 1.00 34.43 C
ATOM 349 O GLY A 48 18.512 52.912 22.607 1.00 34.93 O
"""
pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str)
params = monomer_library.pdb_interpretation.master_params.extract()
params.secondary_structure.enabled=True
processed_pdb_file = monomer_library.pdb_interpretation.process(
mon_lib_srv=mon_lib_srv,
ener_lib=ener_lib,
params=params,
strict_conflict_handling=False,
pdb_inp=pdb_inp,
log=sys.stdout)
geo = processed_pdb_file.geometry_restraints_manager()
assert geo.get_n_hbond_proxies() == 8
make_geo_pickle_unpickle(geo, processed_pdb_file.xray_structure(), prefix)
def test_secondary_structure_2(mon_lib_srv, ener_lib, prefix="tst_grm_pickling_ss2"):
from iotbx.pdb.tst_secondary_structure import pdb_1ywf_sample_strings
pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_1ywf_sample_strings)
params = monomer_library.pdb_interpretation.master_params.extract()
params.secondary_structure.enabled=True
processed_pdb_file = monomer_library.pdb_interpretation.process(
mon_lib_srv=mon_lib_srv,
ener_lib=ener_lib,
params=params,
strict_conflict_handling=False,
pdb_inp=pdb_inp,
log=sys.stdout)
geo | |
packet_chunk['HASVALID']:
string = ""
if (configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH']) == 0:
string += " ((rx_grant_enc_data == {}'d{}) &&".format(1, packet_chunk['LAST_PKT_ENC'])
string += " ({} [{}] == 1'b1))".format("rx_packet_data", packet_chunk['PUSHBIT_LOC'] )
elif delay_value == -1: # -1 means live value
string += " ((rx_grant_enc_data == {}'d{}) &&".format(configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], packet_chunk['LAST_PKT_ENC'])
string += " ({} [{}] == 1'b1))".format("rx_packet_data", packet_chunk['PUSHBIT_LOC'] )
else:
string += " ((rx_grant_enc_data == {}'d{}) &&".format(configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], packet_chunk['LAST_PKT_ENC'])
string += " (rx_grant_enc_dly{}_reg == {}'d{}) &&".format(delay_value, configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], packet_chunk['ENC'])
string += " ({} [{}] == 1'b1))".format("rx_buffer_dly{}_reg".format(delay_value), packet_chunk['PUSHBIT_LOC'])
rx_pushbit_dict [packet_chunk['NAME']] = string
if global_struct.g_PACKET_DEBUG:
print ("before // This is RX Push Bit")
pprint.pprint (rx_pushbit_dict)
file_name.write(" // This is RX Push Bit\n")
for rx_pushbit_key in sorted (rx_pushbit_dict.keys()) :
file_name.write(" assign {:20} ={};\n".format(gen_llink_concat_pushbit (rx_pushbit_key,"output"), rx_pushbit_dict[rx_pushbit_key]))
file_name.write("\n")
### for llink in configuration['LL_LIST']:
### if llink['DIR'] != localdir:
### num_whole_assignment = 0
### for enc_index,entire_packet in enumerate(sorted (loc_packet_info, key=itemgetter('SIZE','PKT_NAME'), reverse=False)):
### for packet_chunk in entire_packet['LIST']:
### if llink['NAME'] == packet_chunk['NAME']:
### if packet_chunk['HASVALID'] == True:
### num_whole_assignment += 1
###
### if global_struct.g_PACKET_DEBUG:
### print("RX pushbit llink {} num_whole_assignment = {}\n".format(llink['NAME'], num_whole_assignment))
###
### if num_whole_assignment > 1 :
### file_name.write(" assign {:20} = ".format(gen_llink_concat_pushbit(llink['NAME'],'output')))
### for enc_index,entire_packet in enumerate(sorted (loc_packet_info, key=itemgetter('SIZE','PKT_NAME'), reverse=False)):
### for packet_chunk in entire_packet['LIST']:
### if llink['NAME'] == packet_chunk['NAME']:
### if packet_chunk['HASVALID'] == True:
### if num_whole_assignment > 1:
### file_name.write("(rx_grant_enc_data == {}'d{}) ? ".format(configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], packet_chunk['ENC']))
### file_name.write("rx_packet_data[{}] ".format(packet_chunk['PUSHBIT_LOC']))
### num_whole_assignment -= 1
### if (num_whole_assignment != 0):
### file_name.write(": ")
### else:
### file_name.write(" assign {:20} = ".format(gen_llink_concat_pushbit(llink['NAME'],'output')))
### for enc_index,entire_packet in enumerate(sorted (loc_packet_info, key=itemgetter('SIZE','PKT_NAME'), reverse=False)):
### for packet_chunk in entire_packet['LIST']:
### if llink['NAME'] == packet_chunk['NAME']:
### if packet_chunk['LAST_PKT'] == True :
### max_pkt_index = packet_chunk['PKT_INDEX']
###
### for packet_chunk in entire_packet['LIST']:
### if llink['NAME'] == packet_chunk['NAME']:
### if packet_chunk['FIRST_PKT'] == True and packet_chunk['LAST_PKT'] == True and packet_chunk['HASVALID'] == True:
### file_name.write("rx_packet_data[{}] ".format(packet_chunk['PUSHBIT_LOC']))
### elif packet_chunk['FIRST_PKT'] == True and packet_chunk['HASVALID'] == True:
### file_name.write("(rx_buffer_dly{}_reg[{}] & (rx_grant_enc_dly{}_reg == {}'d{})) & ".format(packet_chunk['LAST_PKT_INDEX'] - packet_chunk['PKT_INDEX'] -1, packet_chunk['PUSHBIT_LOC'], packet_chunk['LAST_PKT_INDEX'] - packet_chunk['PKT_INDEX'] -1, configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], packet_chunk['ENC']))
### elif packet_chunk['LAST_PKT'] == True :
### file_name.write("(rx_grant_enc_data == {}'d{}) ".format(configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], packet_chunk['ENC']))
### file_name.write(";\n")
### file_name.write("\n")
## This section builds a dict of LL Data, with possibly multiple sources.
## It is used for and RX Data later on.
for entire_packet in sorted (loc_packet_info, key=itemgetter('SIZE','PKT_NAME'), reverse=False):
for packet_chunk in entire_packet['LIST']:
rx_data_key = "{}".format(packet_chunk['CHUNK_NAME'])
if rx_data_key in rx_data_dict:
rx_data_dict[rx_data_key] ['ENC'] = rx_data_dict[rx_data_key] ['ENC']+"_"+str(entire_packet['ENC'])
else: ## New entry
rx_element_dict = dict()
rx_element_dict ['ENC'] = str(entire_packet['ENC'])
rx_element_dict ['NAME'] = packet_chunk['NAME']
rx_element_dict ['WIDTH'] = packet_chunk['WIDTH']
rx_element_dict ['LLINK_LSB'] = packet_chunk['LLINK_LSB']
rx_element_dict ['DELAY'] = packet_chunk['LAST_PKT_INDEX'] - packet_chunk['PKT_INDEX']-1 # -1 means live
rx_element_dict ['FIFODATA_LOC'] = packet_chunk['FIFODATA_LOC']
rx_data_dict[rx_data_key] = rx_element_dict
if global_struct.g_PACKET_DEBUG:
print ("before // This is RX Data")
pprint.pprint (rx_data_dict)
file_name.write(" // This is RX Data\n")
for rx_data_key in sorted (rx_data_dict.keys()) :
enc_list = rx_data_dict[rx_data_key]['ENC'].split("_")
enc_index = len(enc_list)
total_encoding = len(enc_list)-1
if rx_data_dict[rx_data_key]['WIDTH'] > 0:
file_name.write(" assign {:20} {:13} =".format(gen_llink_concat_fifoname (rx_data_dict[rx_data_key]['NAME'],"output") , gen_index_msb (rx_data_dict[rx_data_key]['WIDTH'], rx_data_dict[rx_data_key]['LLINK_LSB']) ))
for encoding_index, encoding in enumerate(enc_list):
if total_encoding > 0:
if encoding_index != total_encoding:
if (rx_data_dict[rx_data_key]['DELAY'] == -1):
file_name.write(" (rx_grant_enc_data == {}'d{}) ?".format(configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], encoding))
else:
file_name.write(" (rx_grant_enc_dly{}_reg == {}'d{}) ?".format(rx_data_dict[rx_data_key]['DELAY'], configuration['TX_PACKET_ID_WIDTH'] if direction == 'slave' else configuration['RX_PACKET_ID_WIDTH'], encoding))
else:
file_name.write(" ")
if rx_data_dict[rx_data_key]['DELAY'] == -1: # -1 means live value
file_name.write(" {:20} ".format("rx_packet_data" ))
else:
file_name.write(" {:20} ".format("rx_buffer_dly{}_reg".format(rx_data_dict[rx_data_key]['DELAY']) ))
file_name.write("{:13}".format(gen_index_msb(rx_data_dict[rx_data_key]['WIDTH'], rx_data_dict[rx_data_key]['FIFODATA_LOC']) ))
if total_encoding > 0:
if encoding_index != total_encoding:
file_name.write(" :\n ")
else:
file_name.write(" ;\n")
else:
file_name.write(";\n")
if rx_buffer_size != 0:
file_name.write("\n")
file_name.write(" // This is Buffer and Encoding Delay\n")
file_name.write(" always_ff @(posedge clk_wr or negedge rst_wr_n)\n")
file_name.write(" if (~rst_wr_n)\n")
file_name.write(" begin\n")
for buff in range(rx_buffer_size):
print_verilog_regnb (file_name , "rx_grant_enc_dly{}_reg".format(buff) , "'0")
for buff in range(rx_buffer_size):
print_verilog_regnb (file_name , "rx_buffer_dly{}_reg".format(buff) , "'0")
file_name.write(" end\n")
file_name.write(" else\n")
file_name.write(" begin\n")
for buff in range(rx_buffer_size):
if buff == 0:
print_verilog_regnb (file_name , "rx_grant_enc_dly{}_reg".format(buff) , "rx_grant_enc_data")
else:
print_verilog_regnb (file_name , "rx_grant_enc_dly{}_reg".format(buff) , "rx_grant_enc_dly{}_reg".format(buff-1))
for buff in range(rx_buffer_size):
if buff == 0:
print_verilog_regnb (file_name , "rx_buffer_dly{}_reg".format(buff) , "rx_packet_data")
else:
print_verilog_regnb (file_name , "rx_buffer_dly{}_reg".format(buff) , "rx_buffer_dly{}_reg".format(buff-1))
file_name.write(" end\n")
file_name.write("\n")
file_name.write("// RX Packet Section\n")
file_name.write("//////////////////////////////////////////////////////////////////\n")
file_name.write("\n")
else:
if direction == 'master':
localdir = 'input';
else:
localdir = 'output';
file_name.write("// No RX Packetization, so tie off packetization signals\n")
for llink in configuration['LL_LIST']:
if llink['DIR'] == localdir:
print_verilog_assign(file_name, gen_llink_concat_ovrd (llink['NAME'],"output"), "1'b0")
file_name.write("\n")
file_name.write("//////////////////////////////////////////////////////////////////\n")
file_name.write("// TX Section\n")
file_name.write("\n")
file_name.write("// TX_CH_WIDTH = {}; // {} running at {} Rate\n".format(configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'], configuration['CHAN_TYPE'], configuration['TX_RATE'] if direction == 'master' else configuration['RX_RATE']))
file_name.write("// TX_DATA_WIDTH = {}; // Usable Data per Channel\n".format(configuration['CHAN_TX_USEABLE1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_USEABLE1PHY_DATA_MAIN'] ))
file_name.write("// TX_PERSISTENT_STROBE = 1'b{};\n".format(int(configuration['TX_PERSISTENT_STROBE'])))
file_name.write("// TX_PERSISTENT_MARKER = 1'b{};\n".format(int(configuration['TX_PERSISTENT_MARKER'])))
file_name.write("// TX_STROBE_GEN2_LOC = 'd{};\n".format(int(configuration['TX_STROBE_GEN2_LOC'])))
file_name.write("// TX_MARKER_GEN2_LOC = 'd{};\n".format(int(configuration['TX_MARKER_GEN2_LOC'])))
file_name.write("// TX_STROBE_GEN1_LOC = 'd{};\n".format(int(configuration['TX_STROBE_GEN1_LOC'])))
file_name.write("// TX_MARKER_GEN1_LOC = 'd{};\n".format(int(configuration['TX_MARKER_GEN1_LOC'])))
file_name.write("// TX_ENABLE_STROBE = 1'b{};\n".format(int(configuration['TX_ENABLE_STROBE'])))
file_name.write("// TX_ENABLE_MARKER = 1'b{};\n".format(int(configuration['TX_ENABLE_MARKER'])))
file_name.write("// TX_DBI_PRESENT = 1'b{};\n".format(int(configuration['TX_DBI_PRESENT'])))
file_name.write("// TX_REG_PHY = 1'b{};\n".format(int(configuration['TX_REG_PHY'])))
file_name.write("\n")
file_name.write(" localparam TX_REG_PHY = 1'b{}; // If set, this enables boundary FF for timing reasons\n".format(int(configuration['TX_REG_PHY'])))
file_name.write("\n")
for phy in range(configuration['NUM_CHAN']):
print_verilog_logic_line (file_name , "tx_phy_preflop_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
use_recov_strobe = False
if ((configuration['TX_ENABLE_STROBE'] if direction == 'master' else configuration['RX_ENABLE_STROBE']) == True and
(configuration['TX_PERSISTENT_STROBE'] if direction == 'master' else configuration['RX_PERSISTENT_STROBE']) == False ) :
use_recov_strobe = True
use_recov_marker = False
if ((configuration['TX_ENABLE_MARKER'] if direction == 'master' else configuration['RX_ENABLE_MARKER']) == True and
(configuration['TX_PERSISTENT_MARKER'] if direction == 'master' else configuration['RX_PERSISTENT_MARKER']) == False ) :
use_recov_marker = True
if use_recov_strobe :
for phy in range(configuration['NUM_CHAN']):
print_verilog_logic_line (file_name , "tx_phy_preflop_recov_strobe_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
if configuration ['GEN2_AS_GEN1_EN']:
for phy in range(configuration['NUM_CHAN']):
print_verilog_logic_line (file_name , "tx_phy_galt_preflop_recov_strobe_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
print_verilog_logic_line (file_name , "tx_phy_final_preflop_recov_strobe_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
if use_recov_marker:
for phy in range(configuration['NUM_CHAN']):
print_verilog_logic_line (file_name , "tx_phy_preflop_recov_marker_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
if configuration ['GEN2_AS_GEN1_EN']:
for phy in range(configuration['NUM_CHAN']):
print_verilog_logic_line (file_name , "tx_phy_galt_preflop_recov_marker_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
print_verilog_logic_line (file_name , "tx_phy_final_preflop_recov_marker_{}".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
for phy in range(configuration['NUM_CHAN']):
print_verilog_logic_line (file_name , "tx_phy_flop_{}_reg".format(phy) , index = gen_index_msb ( configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN'] , sysv=False) )
if configuration['TX_SPARE_WIDTH'] if direction == 'master' else configuration['RX_SPARE_WIDTH'] > 0:
print_verilog_logic_line (file_name , "tx_spare_data", index = gen_index_msb (configuration['TX_SPARE_WIDTH'] if direction == 'master' else configuration['RX_SPARE_WIDTH'], sysv=False) )
file_name.write("\n")
file_name.write(" always_ff @(posedge clk_wr or negedge rst_wr_n)\n")
file_name.write(" if (~rst_wr_n)\n")
file_name.write(" begin\n")
for phy in range(configuration['NUM_CHAN']):
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "{}'b0".format(configuration['CHAN_TX_RAW1PHY_DATA_MAIN'] if direction == 'master' else configuration['CHAN_RX_RAW1PHY_DATA_MAIN']))
file_name.write(" end\n")
file_name.write(" else\n")
file_name.write(" begin\n")
for phy in range(configuration['NUM_CHAN']):
if configuration ['GEN2_AS_GEN1_EN']:
if use_recov_marker:
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "tx_phy_final_preflop_recov_marker_{}".format(phy))
elif use_recov_strobe and not use_recov_marker:
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "tx_phy_final_preflop_recov_strobe_{}".format(phy))
else:
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "tx_phy_preflop_{}".format(phy))
else:
if use_recov_marker:
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "tx_phy_preflop_recov_marker_{}".format(phy))
elif use_recov_strobe and not use_recov_marker:
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "tx_phy_preflop_recov_strobe_{}".format(phy))
else:
print_verilog_regnb (file_name , "tx_phy_flop_{}_reg".format(phy) , "tx_phy_preflop_{}".format(phy))
file_name.write(" end\n")
file_name.write("\n")
for phy in range(configuration['NUM_CHAN']):
if configuration ['GEN2_AS_GEN1_EN']:
if use_recov_marker:
print_verilog_assign(file_name, "tx_phy{}".format(phy), "TX_REG_PHY ? tx_phy_flop_{}_reg : tx_phy_final_preflop_recov_marker_{}".format(phy,phy))
elif use_recov_strobe and not use_recov_marker:
print_verilog_assign(file_name, "tx_phy{}".format(phy), "TX_REG_PHY ? tx_phy_flop_{}_reg : tx_phy_final_preflop_recov_strobe_{}".format(phy,phy))
else:
print_verilog_assign(file_name, "tx_phy{}".format(phy), "TX_REG_PHY ? tx_phy_flop_{}_reg : tx_phy_preflop_{}".format(phy,phy))
else:
if use_recov_marker:
print_verilog_assign(file_name, "tx_phy{}".format(phy), "TX_REG_PHY ? tx_phy_flop_{}_reg : tx_phy_preflop_recov_marker_{}".format(phy,phy))
elif use_recov_strobe and not use_recov_marker:
print_verilog_assign(file_name, "tx_phy{}".format(phy), "TX_REG_PHY ? tx_phy_flop_{}_reg : tx_phy_preflop_recov_strobe_{}".format(phy,phy))
else:
print_verilog_assign(file_name, "tx_phy{}".format(phy), "TX_REG_PHY ? tx_phy_flop_{}_reg : tx_phy_preflop_{}".format(phy,phy))
file_name.write("\n")
##################### Dynamic Gen2/Gen1 section
if configuration ['GEN2_AS_GEN1_EN']:
for phy in range(configuration['NUM_CHAN']):
if use_recov_marker:
print_verilog_assign(file_name, "tx_phy_final_preflop_recov_marker_{}".format(phy), "m_gen2_mode ? tx_phy_preflop_recov_marker_{} : tx_phy_galt_preflop_recov_marker_{}".format(phy,phy))
elif use_recov_strobe and not use_recov_marker:
print_verilog_assign(file_name, "tx_phy_final_preflop_recov_strobe_{}".format(phy), "m_gen2_mode ? tx_phy_preflop_recov_strobe_{} : tx_phy_galt_preflop_recov_strobe_{}".format(phy,phy))
file_name.write("\n")
if use_recov_strobe:
loc_strobe_loc = configuration['TX_STROBE_GEN1_LOC'] if direction == 'master' else configuration['RX_STROBE_GEN1_LOC']
for phy in range(configuration['NUM_CHAN']):
if loc_strobe_loc != 0:
print_verilog_assign(file_name, "tx_phy_galt_preflop_recov_strobe_{0}".format(phy), " tx_phy_preflop_{0}".format(phy),
| |
<gh_stars>0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# File name : controller.py
# Description : Controller PC
# Website : www.adeept.com
# E-mail : <EMAIL>
# Author : William
# Date : 2018/08/22
# Modified : seb3n
# Mod date : 2021/08/15
from socket import *
import time
import threading as thread
import tkinter as tk
stat=0 #A status value,ensure the mainloop() runs only once
tcpClicSock='' #A global variable,for future socket connection
BUFSIZ=1024 #Set a buffer size
ip_stu=1 #Shows connection status
#Global variables of input status
BtnIP=''
ipaddr=''
ipcon=0
send_pwm_conf = 1
def replace_num(initial,new_num): #Call this function to replace data in '.txt' file
newline=""
str_num=str(new_num)
with open("ip.txt","r") as f:
for line in f.readlines():
if(line.find(initial) == 0):
line = initial+"%s" %(str_num)
newline += line
with open("ip.txt","w") as f:
f.writelines(newline) #Call this function to replace data in '.txt' file
def num_import(initial): #Call this function to import data from '.txt' file
with open("ip.txt") as f:
for line in f.readlines():
if(line.find(initial) == 0):
r=line
begin=len(list(initial))
snum=r[begin:]
n=snum
return n
def call_forward(event): #When this function is called, controller commands the car to move forward
tcpClicSock.send(('add').encode())
def call_back(event): #When this function is called, controller commands the car to move backward
tcpClicSock.send(('sub').encode())
def normal_state():
Btn_L0.config(fg=color_text,bg=color_btn)
Btn_L1.config(fg=color_text,bg=color_btn)
Btn_L2.config(fg=color_text,bg=color_btn)
Btn_L3.config(fg=color_text,bg=color_btn)
Btn_L4.config(fg=color_text,bg=color_btn)
Btn_L5.config(fg=color_text,bg=color_btn)
Btn_L6.config(fg=color_text,bg=color_btn)
Btn_L7.config(fg=color_text,bg=color_btn)
Btn_L8.config(fg=color_text,bg=color_btn)
Btn_L9.config(fg=color_text,bg=color_btn)
Btn_L10.config(fg=color_text,bg=color_btn)
Btn_L11.config(fg=color_text,bg=color_btn)
Btn_L12.config(fg=color_text,bg=color_btn)
Btn_L13.config(fg=color_text,bg=color_btn)
Btn_L14.config(fg=color_text,bg=color_btn)
Btn_L15.config(fg=color_text,bg=color_btn)
def set_L0():
print('L0')
tcpClicSock.send(('L0').encode())
normal_state()
Btn_L0.config(fg='#0277BD',bg='#BBDEFB')
def set_L1():
print('L1')
tcpClicSock.send(('L1').encode())
normal_state()
Btn_L1.config(fg='#0277BD',bg='#BBDEFB')
def set_L2():
tcpClicSock.send(('L2').encode())
normal_state()
Btn_L2.config(fg='#0277BD',bg='#BBDEFB')
def set_L3():
tcpClicSock.send(('L3').encode())
normal_state()
Btn_L3.config(fg='#0277BD',bg='#BBDEFB')
def set_L4():
tcpClicSock.send(('L4').encode())
normal_state()
Btn_L4.config(fg='#0277BD',bg='#BBDEFB')
def set_L5():
tcpClicSock.send(('L5').encode())
normal_state()
Btn_L5.config(fg='#0277BD',bg='#BBDEFB')
def set_L6():
tcpClicSock.send(('L6').encode())
normal_state()
Btn_L6.config(fg='#0277BD',bg='#BBDEFB')
def set_L7():
tcpClicSock.send(('L7').encode())
normal_state()
Btn_L7.config(fg='#0277BD',bg='#BBDEFB')
def set_L8():
tcpClicSock.send(('L8').encode())
normal_state()
Btn_L8.config(fg='#0277BD',bg='#BBDEFB')
def set_L9():
tcpClicSock.send(('L9').encode())
normal_state()
Btn_L9.config(fg='#0277BD',bg='#BBDEFB')
def set_L10():
tcpClicSock.send(('L10').encode())
normal_state()
Btn_L10.config(fg='#0277BD',bg='#BBDEFB')
def set_L11():
tcpClicSock.send(('L11').encode())
normal_state()
Btn_L11.config(fg='#0277BD',bg='#BBDEFB')
def set_L12():
tcpClicSock.send(('L12').encode())
normal_state()
Btn_L12.config(fg='#0277BD',bg='#BBDEFB')
def set_L13():
tcpClicSock.send(('L13').encode())
normal_state()
Btn_L13.config(fg='#0277BD',bg='#BBDEFB')
def set_L14():
tcpClicSock.send(('L14').encode())
normal_state()
Btn_L14.config(fg='#0277BD',bg='#BBDEFB')
def set_L15():
tcpClicSock.send(('L15').encode())
normal_state()
Btn_L15.config(fg='#0277BD',bg='#BBDEFB')
def normal_st():
Btn_ST1.config(fg=color_text,bg=color_btn)
Btn_ST2.config(fg=color_text,bg=color_btn)
Btn_ST3.config(fg=color_text,bg=color_btn)
Btn_ST4.config(fg=color_text,bg=color_btn)
Btn_ST5.config(fg=color_text,bg=color_btn)
Btn_ST6.config(fg=color_text,bg=color_btn)
Btn_ST7.config(fg=color_text,bg=color_btn)
Btn_ST8.config(fg=color_text,bg=color_btn)
Btn_ST9.config(fg=color_text,bg=color_btn)
Btn_ST10.config(fg=color_text,bg=color_btn)
Btn_ST11.config(fg=color_text,bg=color_btn)
Btn_ST12.config(fg=color_text,bg=color_btn)
Btn_ST13.config(fg=color_text,bg=color_btn)
Btn_ST14.config(fg=color_text,bg=color_btn)
Btn_MAX.config(fg=color_text,bg=color_btn)
Btn_MIN.config(fg=color_text,bg=color_btn)
def set_ST1():
tcpClicSock.send(('ST1').encode())
normal_st()
Btn_ST1.config(fg='#0277BD',bg='#BBDEFB')
def set_ST2():
tcpClicSock.send(('ST2').encode())
normal_st()
Btn_ST2.config(fg='#0277BD',bg='#BBDEFB')
def set_ST3():
tcpClicSock.send(('ST3').encode())
normal_st()
Btn_ST3.config(fg='#0277BD',bg='#BBDEFB')
def set_ST4():
tcpClicSock.send(('ST4').encode())
normal_st()
Btn_ST4.config(fg='#0277BD',bg='#BBDEFB')
def set_ST5():
tcpClicSock.send(('ST5').encode())
normal_st()
Btn_ST5.config(fg='#0277BD',bg='#BBDEFB')
def set_ST6():
tcpClicSock.send(('ST6').encode())
normal_st()
Btn_ST6.config(fg='#0277BD',bg='#BBDEFB')
def set_ST7():
tcpClicSock.send(('ST7').encode())
normal_st()
Btn_ST7.config(fg='#0277BD',bg='#BBDEFB')
def set_ST8():
tcpClicSock.send(('ST8').encode())
normal_st()
Btn_ST8.config(fg='#0277BD',bg='#BBDEFB')
def set_ST9():
tcpClicSock.send(('ST9').encode())
normal_st()
Btn_ST9.config(fg='#0277BD',bg='#BBDEFB')
def set_ST10():
tcpClicSock.send(('ST10').encode())
normal_st()
Btn_ST10.config(fg='#0277BD',bg='#BBDEFB')
def set_ST11():
tcpClicSock.send(('ST11').encode())
normal_st()
Btn_ST10.config(fg='#0277BD',bg='#BBDEFB')
def set_ST12():
tcpClicSock.send(('ST12').encode())
normal_st()
Btn_ST10.config(fg='#0277BD',bg='#BBDEFB')
def set_ST13():
tcpClicSock.send(('ST13').encode())
normal_st()
Btn_ST10.config(fg='#0277BD',bg='#BBDEFB')
def set_ST14():
tcpClicSock.send(('ST14').encode())
normal_st()
Btn_ST10.config(fg='#0277BD',bg='#BBDEFB')
def set_MIN():
tcpClicSock.send(('MIN').encode())
normal_st()
Btn_ST2.config(fg='#0277BD',bg='#BBDEFB')
def set_MAX():
tcpClicSock.send(('MAX').encode())
normal_st()
Btn_ST2.config(fg='#0277BD',bg='#BBDEFB')
def set_config():
tcpClicSock.send(('config').encode())
def set_reset():
tcpClicSock.send(('reset').encode())
def set_save():
tcpClicSock.send(('save').encode())
def set_run():
setp_send = var_setps.get()
time_send = var_time.get()
tcpClicSock.send(('run %s %s'%(setp_send,time_send)).encode())
def set_stop():
tcpClicSock.send(('stop').encode())
def set_all():
setp_send = var_setps.get()
time_send = var_time.get()
tcpClicSock.send(('all %s %s'%(setp_send,time_send)).encode())
def set_stepall():
tcpClicSock.send(('frame').encode())
def set_pwm_thread():
global send_pwm_conf
while 1:
if send_pwm_conf == 0:
time.sleep(0.3)
tcpClicSock.send((var_pwm.get()).encode())
send_pwm_conf = 1
time.sleep(0.2)
def set_pwm(event):
global send_pwm_conf
if send_pwm_conf == 1:
#tcpClicSock.send((var_pwm.get()).encode())
send_pwm_conf = 0
def loop(): #GUI
global tcpClicSock,BtnIP,led_status,color_text,color_btn,Btn_L0,Btn_L1,Btn_L2,Btn_L3,Btn_L4,Btn_L5,Btn_L6,Btn_L7,Btn_L8,Btn_L9,Btn_L10,Btn_L11,Btn_L12,Btn_L13,Btn_L14,Btn_L15,Btn_ST1, Btn_ST2,Btn_ST3,Btn_ST4,Btn_ST5,Btn_ST6,Btn_ST7,Btn_ST8,Btn_ST9,Btn_ST10,Btn_ST11,Btn_ST12,Btn_ST13,Btn_ST14,Btn_MIN,Btn_MAX,var_setps,var_time,var_pwm #The value of tcpClicSock changes in the function loop(),would also changes in global so the other functions could use it.
while True:
color_bg='#000000' #Set background color
color_text='#E1F5FE' #Set text color
color_btn='#0277BD' #Set button color
color_line='#01579B' #Set line color
color_can='#212121' #Set canvas color
color_oval='#2196F3' #Set oval color
target_color='#FF6D00'
root = tk.Tk() #Define a window named root
root.title('Adeept') #Main window title
root.geometry('800x630') #Main window size, middle of the English letter x.
root.config(bg=color_bg) #Set the background color of root window
var_pwm = tk.StringVar() #Speed value saved in a StringVar
var_pwm.set(425) #Set a default speed,but change it would not change the default speed value in the car,you need to click button'Set' to send the value to the car
var_setps = tk.StringVar()
var_setps.set(5)
var_time = tk.StringVar()
var_time.set(0.2)
logo =tk.PhotoImage(file = 'logo.png') #Define the picture of logo,but only supports '.png' and '.gif'
l_logo=tk.Label(root,image = logo,bg=color_bg) #Set a label to show the logo picture
l_logo.place(x=30,y=13) #Place the Label in a right position
def connect(event): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def connect_2(): #Call this function to connect with the server
if ip_stu == 1:
sc=thread.Thread(target=socket_connect) #Define a thread for connection
sc.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
sc.start() #Thread starts
def socket_connect(): #Call this function to connect with the server
global ADDR,tcpClicSock,BUFSIZ,ip_stu,ipaddr
ip_adr=E1.get() #Get the IP address from Entry
if ip_adr == '': #If no input IP address in Entry,import a default IP
ip_adr=num_import('IP:')
l_ip_4.config(text='Connecting')
l_ip_4.config(bg='#FF8F00')
l_ip_5.config(text='Default:%s'%ip_adr)
pass
SERVER_IP = ip_adr
SERVER_PORT = 10223 #Define port serial
BUFSIZ = 1024 #Define buffer size
ADDR = (SERVER_IP, SERVER_PORT)
tcpClicSock = socket(AF_INET, SOCK_STREAM) #Set connection value for socket
for i in range (1,6): #Try 5 times if disconnected
try:
if ip_stu == 1:
print("Connecting to server @ %s:%d..." %(SERVER_IP, SERVER_PORT))
print("Connecting")
tcpClicSock.connect(ADDR) #Connection with the server
print("Connected")
l_ip_5.config(text='IP:%s'%ip_adr)
l_ip_4.config(text='Connected')
l_ip_4.config(bg='#558B2F')
replace_num('IP:',ip_adr)
E1.config(state='disabled') #Disable the Entry
Btn14.config(state='disabled',bg='#212121') #Disable the Entry
ip_stu=0 #'0' means connected
at=thread.Thread(target=code_receive) #Define a thread for data receiving
at.setDaemon(True) #'True' means it is a front thread,it would close when the mainloop() closes
at.start() #Thread starts
break
else:
break
except Exception:
print("Cannot connecting to server,try it latter!")
l_ip_4.config(text='Try %d/5 time(s)'%i)
l_ip_4.config(bg='#EF6C00')
print('Try %d/5 time(s)'%i)
ip_stu=1
time.sleep(1)
continue
if ip_stu == 1:
l_ip_4.config(text='Disconnected')
l_ip_4.config(bg='#F44336')
def code_receive(): #A function for data receiving
global led_status,ipcon,findline_status,auto_status,opencv_status,speech_status
while True:
code_car = tcpClicSock.recv(BUFSIZ) #Listening,and save the data in 'code_car'
l_ip.config(text=code_car) #Put the data on the label
#print(code_car)
data = code_car.decode()
if not code_car:
continue
elif 'L0' == data:
l_ip_L.config(text='L0')
elif 'L1' == data:
l_ip_L.config(text='L1')
elif 'L2' == data:
l_ip_L.config(text='L2')
elif 'L3' == data:
l_ip_L.config(text='L3')
elif 'L4' == data:
l_ip_L.config(text='L4')
elif 'L5' == data:
l_ip_L.config(text='L5')
elif 'L6' == data:
l_ip_L.config(text='L6')
elif 'L7' == data:
l_ip_L.config(text='L7')
elif 'L8' == data:
l_ip_L.config(text='L8')
elif 'L9' == data:
l_ip_L.config(text='L9')
elif 'L10' == data:
l_ip_L.config(text='L10')
elif 'L11' == data:
l_ip_L.config(text='L11')
elif 'L12' == data:
l_ip_L.config(text='L12')
elif 'L13' == data:
l_ip_L.config(text='L13')
elif 'L14' == data:
l_ip_L.config(text='L14')
elif 'L15' == data:
l_ip_L.config(text='L15')
elif 'ST1' == data:
l_ip_SET.config(text='ST1')
elif 'ST2' == data:
l_ip_SET.config(text='ST2')
elif 'ST3' == data:
l_ip_SET.config(text='ST3')
elif 'ST4' == data:
l_ip_SET.config(text='ST4')
elif 'ST5' == data:
l_ip_SET.config(text='ST5')
elif 'ST6' == data:
l_ip_SET.config(text='ST6')
elif 'ST7' == data:
l_ip_SET.config(text='ST7')
elif 'ST8' == data:
l_ip_SET.config(text='ST8')
elif 'ST9' == data:
l_ip_SET.config(text='ST9')
elif 'ST10' == data:
l_ip_SET.config(text='ST10')
elif 'ST11' == data:
l_ip_SET.config(text='ST11')
elif 'ST12' == data:
l_ip_SET.config(text='ST12')
elif 'ST13' == data:
l_ip_SET.config(text='ST13')
elif 'ST14' == data:
l_ip_SET.config(text='ST14')
elif 'MAX' == data:
l_ip_SET.config(text='MAX')
elif 'MIN' == data:
l_ip_SET.config(text='MIN')
else:
try:
org = int(data)
var_pwm.set(org)
except:
pass
s1 = tk.Scale(root,label="PWM",
from_=100,to=700,orient=tk.HORIZONTAL,length=740,
showvalue=1,tickinterval=100,resolution=1,variable=var_pwm,troughcolor='#42A5F5',fg=color_text,command=set_pwm,bg=color_bg,highlightthickness=0)
s1.place(x=30,y=270) #Define a Scale and put it in position
s2 = tk.Scale(root,label="SETPS",
from_=1,to=14,orient=tk.HORIZONTAL,length=440,
showvalue=1,tickinterval=1,resolution=1,variable=var_setps,troughcolor='#42A5F5',fg=color_text,bg=color_bg,highlightthickness=0)
s2.place(x=330,y=350) #Define a Scale and put it in position
s3 = tk.Scale(root,label="TIME",
from_=0.1,to=1,orient=tk.HORIZONTAL,length=440,
showvalue=0.1,tickinterval=0.2,resolution=0.1,variable=var_time,troughcolor='#42A5F5',fg=color_text,bg=color_bg,highlightthickness=0)
s3.place(x=330,y=450) #Define a Scale and put it in position
Btn_RUN = tk.Button(root, width=18, text='RUN',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_run)
Btn_RUN.place(x=187,y=388)
Btn_ALL = tk.Button(root, width=18, text='RUN ALL',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_all)
Btn_ALL.place(x=187,y=428)
Btn_STOP = tk.Button(root, width=18, text='STOP',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_stop)
Btn_STOP.place(x=187,y=488)
Btn_STEPALL = tk.Button(root, width=18, text='STEP ALL',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_stepall)
Btn_STEPALL.place(x=30,y=488)
l_ip=tk.Label(root,width=18,text='Status',fg=color_text,bg=color_btn)
l_ip.place(x=30,y=110) #Define a Label and put it in position
l_ip_2=tk.Label(root,width=18,text='pwm:%s'%(var_pwm.get()),fg=color_text,bg=color_btn)
l_ip_2.place(x=30,y=145) #Define a Label and put it in position
l_ip_L=tk.Label(root,width=18,text='choose L',fg=color_text,bg=color_btn)
l_ip_L.place(x=230,y=145)
l_ip_SET=tk.Label(root,width=18,text='pos set',fg=color_text,bg=color_btn)
l_ip_SET.place(x=430,y=145)
l_ip_4=tk.Label(root,width=18,text='Disconnected',fg=color_text,bg='#F44336')
l_ip_4.place(x=637,y=110) #Define a Label and put it in position
l_ip_5=tk.Label(root,width=18,text='Use default IP',fg=color_text,bg=color_btn)
l_ip_5.place(x=637,y=145) #Define a Label and put it in position
E1 = tk.Entry(root,show=None,width=16,bg="#37474F",fg='#eceff1')
E1.place(x=170,y=40) #Define a Entry and put it in position
l_ip_3=tk.Label(root,width=10,text='IP Address:',fg=color_text,bg='#000000')
l_ip_3.place(x=165,y=15) #Define a Label and put it in position
Btn14= tk.Button(root, width=8, text='Connect',fg=color_text,bg=color_btn,command=connect_2,relief='ridge')
Btn14.place(x=300,y=35) #Define a Button and put it in position
#Define buttons and put these in position
Btn0 = tk.Button(root, width=3, text='+',bd=0,fg=color_text,bg=color_btn,relief='ridge')
Btn1 = tk.Button(root, width=3, text='-',bd=0,fg=color_text,bg=color_btn,relief='ridge')
Btn0.place(x=30,y=195)
Btn1.place(x=30,y=230)
Btn_L0 = tk.Button(root, width=3, text='L0',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L0)
Btn_L0.place(x=65,y=195)
Btn_L1 = tk.Button(root, width=3, text='L1',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L1)
Btn_L1.place(x=100,y=195)
Btn_L2 = tk.Button(root, width=3, text='L2',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L2)
Btn_L2.place(x=135,y=195)
Btn_L3 = tk.Button(root, width=3, text='L3',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L3)
Btn_L3.place(x=170,y=195)
Btn_L4 = tk.Button(root, width=3, text='L4',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L4)
Btn_L4.place(x=205,y=195)
Btn_L5 = tk.Button(root, width=3, text='L5',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L5)
Btn_L5.place(x=240,y=195)
Btn_L6 = tk.Button(root, width=3, text='L6',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L6)
Btn_L6.place(x=275,y=195)
Btn_L7 = tk.Button(root, width=3, text='L7',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L7)
Btn_L7.place(x=310,y=195)
Btn_L8 = tk.Button(root, width=3, text='L8',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L8)
Btn_L8.place(x=345,y=195)
Btn_L9 = tk.Button(root, width=3, text='L9',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L9)
Btn_L9.place(x=380,y=195)
Btn_L10 = tk.Button(root, width=3, text='L10',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L10)
Btn_L10.place(x=415,y=195)
Btn_L11 = tk.Button(root, width=3, text='L11',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L11)
Btn_L11.place(x=450,y=195)
Btn_L12 = tk.Button(root, width=3, text='L12',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L12)
Btn_L12.place(x=485,y=195)
Btn_L13 = tk.Button(root, width=3, text='L13',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L13)
Btn_L13.place(x=520,y=195)
Btn_L14 = tk.Button(root, width=3, text='L14',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L14)
Btn_L14.place(x=555,y=195)
Btn_L15 = tk.Button(root, width=3, text='L15',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_L15)
Btn_L15.place(x=590,y=195)
Btn_CONFIG = tk.Button(root, width=18, text='CONFIG',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_config)
Btn_CONFIG.place(x=637,y=195)
Btn_SAVE = tk.Button(root, width=8, text='SAVE',bd=0,fg=color_text,bg=color_btn,relief='ridge',command=set_save)
Btn_SAVE.place(x=637,y=230)
Btn_RESET | |
duplicate geometries.
Args:
input_path (PathLike): the input file
output_path (PathLike): the file to write the result to
input_layer (str, optional): input layer name. Optional if the input
file only contains one layer.
output_layer (str, optional): input layer name. Optional if the input
file only contains one layer.
columns (List[str], optional): If not None, only output the columns
specified. Defaults to None.
explodecollections (bool, optional): True to output only simple geometries.
Defaults to False.
force (bool, optional): overwrite existing output file(s).
Defaults to False.
"""
logger.info(f"Start delete_duplicate_geometries on {input_path}")
return _geoops_sql.delete_duplicate_geometries(
input_path=Path(input_path),
output_path=Path(output_path),
input_layer=input_layer,
output_layer=output_layer,
columns=columns,
explodecollections=explodecollections,
force=force,
)
def dissolve(
input_path: Union[str, "os.PathLike[Any]"],
output_path: Union[str, "os.PathLike[Any]"],
explodecollections: bool,
groupby_columns: Optional[List[str]] = None,
agg_columns: Optional[dict] = None,
tiles_path: Union[str, "os.PathLike[Any]", None] = None,
nb_squarish_tiles: int = 1,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
"""
Applies a dissolve operation on the input file.
If columns are specified with ``groupby_columns``, the data is first grouped
on those columns before the geometries are merged.
Data in other columns can be retained in the output by specifying the
``agg_columns`` parameter.
This is an example of how data in the columns that isn't grouped on can be
aggregated to be added to the output file:
::
import geofileops as gfo
gfo.dissolve(
input_path=...,
output_path=...,
groupby_columns=["cropgroup"],
agg_columns={
"columns": [
{"column": "crop", "agg": "max", "as": "crop_max"},
{"column": "crop", "agg": "count", "as": "crop_count"},
{
"column": "crop",
"agg": "concat",
"distinct": True,
"sep": ";",
"as": "crop_concat",
},
{"column": "area", "agg": "mean", "as": "area_mean"},
]
},
explodecollections=False,
)
The following example will save all detailed data for the columns
"crop_label" and "area" in the output file. The detailed data is encoded
per group/row in a "json" text field. Shapefiles only support up to 254
characters in a text field, so this format won't be very suited as output
format for this option.
::
import geofileops as gfo
gfo.dissolve(
input_path=...,
output_path=...,
groupby_columns=["cropgroup"],
agg_columns={"json": ["crop", "area"]},
explodecollections=False,
)
This results in this type of output:
::
cropgroup json
Grasses ["{"crop":"Meadow","area":1290,"fid_orig":5}","{"crop":"Pasture",...
Maize ["{"crop":"Silo","area":3889.29,"fid_orig":2}","{"crop":"Fodder",...
If the output is tiled (by specifying ``tiles_path`` or ``nb_squarish_tiles`` > 1),
the result will be clipped on the output tiles and the tile borders are
never crossed.
Args:
input_path (PathLike): the input file
output_path (PathLike): the file to write the result to
explodecollections (bool): True to output only simple geometries. If
False, this can result in huge geometries for large files,
especially if no groupby_columns are specified.
groupby_columns (List[str], optional): columns to group on while
aggregating. Defaults to None, resulting in a spatial union of all
geometries that touch.
agg_columns (dict, optional): columns to aggregate based on
the groupings by groupby columns. Depending on the top-level key
value of the dict, the output for the aggregation is different:
- "json": dump all data per group to one "json" column. The
value can be None (= all columns) or a list of columns to include.
- "columns": aggregate to seperate columns. The value should
be a list of dicts with the following keys:
- "column": column name in the input file.
- "agg": aggregation to use:
- count: the number of items
- sum:
- mean
- min
- max
- median
- concat
- "as": column name in the output file.
- "distinct" (optional): True to distinct the values before
aggregation.
tiles_path (PathLike, optional): a path to a geofile containing tiles.
If specified, the output will be dissolved/unioned only within the
tiles provided.
Can be used to evade huge geometries being created if the input
geometries are very interconnected.
Defaults to None (= the output is not tiled).
nb_squarish_tiles (int, optional): the approximate number of tiles the
output should be dissolved/unioned to. If > 1, a tiling grid is
automatically created based on the total bounds of the input file.
The input geometries will be dissolved/unioned only within the
tiles generated.
Can be used to evade huge geometries being created if the input
geometries are very interconnected.
Defaults to 1 (= the output is not tiled).
input_layer (str, optional): input layer name. Optional if the
file only contains one layer.
output_layer (str, optional): input layer name. Optional if the
file only contains one layer.
nb_parallel (int, optional): the number of parallel processes to use.
Defaults to -1: use all available processors.
batchsize (int, optional): indicative number of rows to process per
batch. A smaller batch size, possibly in combination with a
smaller nb_parallel, will reduce the memory usage.
Defaults to -1: (try to) determine optimal size automatically.
force (bool, optional): overwrite existing output file(s).
Defaults to False.
"""
# Init
if tiles_path is not None:
tiles_path = Path(tiles_path)
# If an empty list of geometry columns is passed, convert it to None to
# simplify the rest of the code
if groupby_columns is not None and len(groupby_columns) == 0:
groupby_columns = None
logger.info(f"Start dissolve on {input_path} to {output_path}")
return _geoops_gpd.dissolve(
input_path=Path(input_path),
output_path=Path(output_path),
explodecollections=explodecollections,
groupby_columns=groupby_columns,
agg_columns=agg_columns,
tiles_path=tiles_path,
nb_squarish_tiles=nb_squarish_tiles,
input_layer=input_layer,
output_layer=output_layer,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def isvalid(
input_path: Union[str, "os.PathLike[Any]"],
output_path: Union[str, "os.PathLike[Any]", None] = None,
only_invalid: bool = False,
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
) -> bool:
"""
Checks for all geometries in the geofile if they are valid, and writes the
results to the output file
Args:
input_path (PathLike): The input file.
output_path (PathLike, optional): The output file path. If not
specified the result will be written in a new file alongside the
input file. Defaults to None.
only_invalid (bool, optional): if True, only put invalid results in the
output file. Deprecated: always treated as True.
input_layer (str, optional): input layer name. Optional if the
file only contains one layer.
output_layer (str, optional): input layer name. Optional if the
file only contains one layer.
nb_parallel (int, optional): the number of parallel processes to use.
Defaults to -1: use all available processors.
batchsize (int, optional): indicative number of rows to process per
batch. A smaller batch size, possibly in combination with a
smaller nb_parallel, will reduce the memory usage.
Defaults to -1: (try to) determine optimal size automatically.
force (bool, optional): overwrite existing output file(s).
Defaults to False.
Returns:
bool: True if all geometries were valid.
"""
# Check parameters
if output_path is not None:
output_path = Path(output_path)
else:
input_path = Path(input_path)
output_path = (
input_path.parent / f"{input_path.stem}_isvalid{input_path.suffix}"
)
# Go!
logger.info(f"Start isvalid on {input_path}")
return _geoops_sql.isvalid(
input_path=Path(input_path),
output_path=output_path,
input_layer=input_layer,
output_layer=output_layer,
nb_parallel=nb_parallel,
batchsize=batchsize,
force=force,
)
def makevalid(
input_path: Union[str, "os.PathLike[Any]"],
output_path: Union[str, "os.PathLike[Any]"],
input_layer: Optional[str] = None,
output_layer: Optional[str] = None,
columns: Optional[List[str]] = None,
explodecollections: bool = False,
force_output_geometrytype: Optional[GeometryType] = None,
precision: Optional[float] = None,
nb_parallel: int = -1,
batchsize: int = -1,
force: bool = False,
):
"""
Makes all geometries in the input file valid and writes the result to the
output path.
Alternative names:
- QGIS: fix geometries
- shapely: make_valid
Args:
input_path (PathLike): The input file.
output_path (PathLike): The file to write the result to.
input_layer (str, optional): input layer name. Optional if the
file only contains one layer.
output_layer (str, optional): input layer name. Optional if the
file only contains one layer.
columns (List[str], optional): If not None, only output the columns
specified. Defaults to None.
explodecollections (bool, optional): True to output only simple geometries.
Defaults to False.
force_output_geometrytype (GeometryType, optional): The output geometry type to
force. Defaults to None, and then the geometry type of the input is used
precision (floas, optional): the precision to keep in the coordinates.
Eg. 0.001 to keep 3 decimals. None doesn't change the precision.
Defaults to None.
nb_parallel (int, optional): the number of parallel processes to use.
Defaults to -1: use all available processors.
batchsize (int, optional): indicative number of rows to process per
batch. | |
chassis_id_mac_address = 4
Chassis ID is a MAC address
.. data:: chassis_id_network_address = 5
Chassis ID is a network address
.. data:: chassis_id_interface_name = 6
Chassis ID is an interface name
.. data:: chassis_id_local = 7
Chassis ID is a local name
.. data:: chassis_id_unknown_type = 8
Unknown Chassis ID type
"""
chassis_id_chassis_component = Enum.YLeaf(1, "chassis-id-chassis-component")
chassis_id_interface_alias = Enum.YLeaf(2, "chassis-id-interface-alias")
chassis_id_port_component = Enum.YLeaf(3, "chassis-id-port-component")
chassis_id_mac_address = Enum.YLeaf(4, "chassis-id-mac-address")
chassis_id_network_address = Enum.YLeaf(5, "chassis-id-network-address")
chassis_id_interface_name = Enum.YLeaf(6, "chassis-id-interface-name")
chassis_id_local = Enum.YLeaf(7, "chassis-id-local")
chassis_id_unknown_type = Enum.YLeaf(8, "chassis-id-unknown-type")
class CfmPmEgressAction(Enum):
"""
CfmPmEgressAction (Enum Class)
Egress action
.. data:: egress_ok = 1
OK
.. data:: egress_down = 2
Down
.. data:: egress_blocked = 3
STP Blocked
.. data:: egress_vid = 4
VID Blocked
"""
egress_ok = Enum.YLeaf(1, "egress-ok")
egress_down = Enum.YLeaf(2, "egress-down")
egress_blocked = Enum.YLeaf(3, "egress-blocked")
egress_vid = Enum.YLeaf(4, "egress-vid")
class CfmPmElmReplyFilter(Enum):
"""
CfmPmElmReplyFilter (Enum Class)
Reply filter used for Exploratory Linktrace
operations
.. data:: reply_filter_not_present = 0
Reply Filter not present
.. data:: reply_filter_default = 1
Reply from ports which are not MAC-pruned,
VID-pruned, or STP-blocked
.. data:: reply_filter_vlan_topology = 2
Reply from ports which are not VID-pruned or
STP-blocked
.. data:: reply_filter_spanning_tree = 3
Reply from ports which are not STP-blocked
.. data:: reply_filter_all_ports = 4
Reply from all ports
"""
reply_filter_not_present = Enum.YLeaf(0, "reply-filter-not-present")
reply_filter_default = Enum.YLeaf(1, "reply-filter-default")
reply_filter_vlan_topology = Enum.YLeaf(2, "reply-filter-vlan-topology")
reply_filter_spanning_tree = Enum.YLeaf(3, "reply-filter-spanning-tree")
reply_filter_all_ports = Enum.YLeaf(4, "reply-filter-all-ports")
class CfmPmElrEgressAction(Enum):
"""
CfmPmElrEgressAction (Enum Class)
ELR Egress action
.. data:: elr_egress_ok = 1
OK
.. data:: elr_egress_down = 2
Down
.. data:: elr_egress_blocked = 3
STP Blocked
.. data:: elr_egress_vid = 4
VID Blocked
.. data:: elr_egress_mac = 255
MAC Pruned
"""
elr_egress_ok = Enum.YLeaf(1, "elr-egress-ok")
elr_egress_down = Enum.YLeaf(2, "elr-egress-down")
elr_egress_blocked = Enum.YLeaf(3, "elr-egress-blocked")
elr_egress_vid = Enum.YLeaf(4, "elr-egress-vid")
elr_egress_mac = Enum.YLeaf(255, "elr-egress-mac")
class CfmPmElrIngressAction(Enum):
"""
CfmPmElrIngressAction (Enum Class)
ELR Ingress action
.. data:: elr_ingress_ok = 1
OK
.. data:: elr_ingress_down = 2
Down
.. data:: elr_ingress_blocked = 3
STP Blocked
.. data:: elr_ingress_vid = 4
VID Blocked
"""
elr_ingress_ok = Enum.YLeaf(1, "elr-ingress-ok")
elr_ingress_down = Enum.YLeaf(2, "elr-ingress-down")
elr_ingress_blocked = Enum.YLeaf(3, "elr-ingress-blocked")
elr_ingress_vid = Enum.YLeaf(4, "elr-ingress-vid")
class CfmPmElrRelayAction(Enum):
"""
CfmPmElrRelayAction (Enum Class)
ELR relay action
.. data:: elr_relay_hit = 1
Target Hit
.. data:: elr_relay_fdb = 2
Filtering database
.. data:: elr_relay_flood = 3
Flood forwarded
.. data:: elr_relay_drop = 4
Dropped
"""
elr_relay_hit = Enum.YLeaf(1, "elr-relay-hit")
elr_relay_fdb = Enum.YLeaf(2, "elr-relay-fdb")
elr_relay_flood = Enum.YLeaf(3, "elr-relay-flood")
elr_relay_drop = Enum.YLeaf(4, "elr-relay-drop")
class CfmPmEltDelayModel(Enum):
"""
CfmPmEltDelayModel (Enum Class)
Delay model used for Exploratory Linktrace
operations
.. data:: delay_model_invalid = 0
Not a valid delay model
.. data:: delay_model_logarithmic = 1
Reply using logarithmic delay model
.. data:: delay_model_constant = 2
Reply using constant delay model
"""
delay_model_invalid = Enum.YLeaf(0, "delay-model-invalid")
delay_model_logarithmic = Enum.YLeaf(1, "delay-model-logarithmic")
delay_model_constant = Enum.YLeaf(2, "delay-model-constant")
class CfmPmIdFmt(Enum):
"""
CfmPmIdFmt (Enum Class)
ID format
.. data:: id_format_is_string = 0
ID format is a string
.. data:: id_format_is_mac_address = 1
ID format is a MAC address
.. data:: id_format_is_raw_hex = 2
ID format is raw hex
"""
id_format_is_string = Enum.YLeaf(0, "id-format-is-string")
id_format_is_mac_address = Enum.YLeaf(1, "id-format-is-mac-address")
id_format_is_raw_hex = Enum.YLeaf(2, "id-format-is-raw-hex")
class CfmPmIngressAction(Enum):
"""
CfmPmIngressAction (Enum Class)
Ingress action
.. data:: ingress_ok = 1
OK
.. data:: ingress_down = 2
Down
.. data:: ingress_blocked = 3
STP Blocked
.. data:: ingress_vid = 4
VID Blocked
"""
ingress_ok = Enum.YLeaf(1, "ingress-ok")
ingress_down = Enum.YLeaf(2, "ingress-down")
ingress_blocked = Enum.YLeaf(3, "ingress-blocked")
ingress_vid = Enum.YLeaf(4, "ingress-vid")
class CfmPmIntfStatus(Enum):
"""
CfmPmIntfStatus (Enum Class)
Interface status
.. data:: interface_status_up = 1
Interface is up
.. data:: interface_status_down = 2
Interface is down
.. data:: interface_status_testing = 3
Interface is in testing mode
.. data:: interface_status_unknown = 4
Unknown interface status
.. data:: interface_status_dormant = 5
Interface is dormant
.. data:: interface_status_not_present = 6
Interface status not found
.. data:: interface_status_lower_layer_down = 7
Lower layer is down
"""
interface_status_up = Enum.YLeaf(1, "interface-status-up")
interface_status_down = Enum.YLeaf(2, "interface-status-down")
interface_status_testing = Enum.YLeaf(3, "interface-status-testing")
interface_status_unknown = Enum.YLeaf(4, "interface-status-unknown")
interface_status_dormant = Enum.YLeaf(5, "interface-status-dormant")
interface_status_not_present = Enum.YLeaf(6, "interface-status-not-present")
interface_status_lower_layer_down = Enum.YLeaf(7, "interface-status-lower-layer-down")
class CfmPmLastHopFmt(Enum):
"""
CfmPmLastHopFmt (Enum Class)
Last hop identifier format
.. data:: last_hop_none = 0
No last hop identifier
.. data:: last_hop_host_name = 1
Last hop identifier is a hostname
.. data:: last_hop_egress_id = 2
Last hop identifier is an egress ID
"""
last_hop_none = Enum.YLeaf(0, "last-hop-none")
last_hop_host_name = Enum.YLeaf(1, "last-hop-host-name")
last_hop_egress_id = Enum.YLeaf(2, "last-hop-egress-id")
class CfmPmLtMode(Enum):
"""
CfmPmLtMode (Enum Class)
Type of Linktrace operation
.. data:: cfm_pm_lt_mode_basic = 1
Basic IEEE 802.1ag Linktrace
.. data:: cfm_pm_lt_mode_exploratory = 2
Cisco Exploratory Linktrace
"""
cfm_pm_lt_mode_basic = Enum.YLeaf(1, "cfm-pm-lt-mode-basic")
cfm_pm_lt_mode_exploratory = Enum.YLeaf(2, "cfm-pm-lt-mode-exploratory")
class CfmPmMepDefect(Enum):
"""
CfmPmMepDefect (Enum Class)
Defects that can be reported by a MEP
.. data:: defect_none = 0
No defect reported
.. data:: defect_rdi_ccm = 1
Some Peer MEP's CCM has the RDI bit set
.. data:: defect_ma_cstatus = 2
A Peer MEP port or interface status error has
been reported
.. data:: defect_remote_ccm = 3
Not receiving valid CCMs from at least one Peer
MEP
.. data:: defect_error_ccm = 4
Currently receiving invalid CCMs from at least
one Peer MEP
.. data:: defect_cross_connect_ccm = 5
Currently receiving CCMs from an incorrect
service (MA)
"""
defect_none = Enum.YLeaf(0, "defect-none")
defect_rdi_ccm = Enum.YLeaf(1, "defect-rdi-ccm")
defect_ma_cstatus = Enum.YLeaf(2, "defect-ma-cstatus")
defect_remote_ccm = Enum.YLeaf(3, "defect-remote-ccm")
defect_error_ccm = Enum.YLeaf(4, "defect-error-ccm")
defect_cross_connect_ccm = Enum.YLeaf(5, "defect-cross-connect-ccm")
class CfmPmMepFngState(Enum):
"""
CfmPmMepFngState (Enum Class)
Fault Notification Generation state machine
states
.. data:: fng_reset = 1
FNG in reset state
.. data:: fng_defect = 2
FNG has detected but not yet reported a defect
.. data:: fng_report_defect = 3
FNG is in the process of reporting a defect
.. data:: fng_defect_reported = 4
FNG has reported a defect
.. data:: fng_defect_clearing = 5
No defect present, but the reset timer has not
yet expired
"""
fng_reset = Enum.YLeaf(1, "fng-reset")
fng_defect = Enum.YLeaf(2, "fng-defect")
fng_report_defect = Enum.YLeaf(3, "fng-report-defect")
fng_defect_reported = Enum.YLeaf(4, "fng-defect-reported")
fng_defect_clearing = Enum.YLeaf(5, "fng-defect-clearing")
class CfmPmPktAction(Enum):
"""
CfmPmPktAction (Enum Class)
Action taken for received packet
.. data:: packet_processed = 0
Packet processed successfully
.. data:: packet_forwarded = 1
Packet forwarded
.. data:: unknown_opcode = 2
Packet dropped at a MEP due to unknown opcode
.. data:: filter_level = 3
Packet dropped due to level/opcode filtering at
a MEP
.. data:: filter_blocked = 4
Packet dropped because interface is STP blocked
.. data:: filter_local_mac = 5
Packet dropped due to local destination MAC
.. data:: malformed_ccm_size = 6
CCM too short or too long
.. data:: malformed_ccm_mep_id = 7
Invalid MEP-ID
.. data:: malformed_too_short = 8
Packet too short
.. data:: malformed_destination_mac_unicast = 9
Destination MAC address does not match
interface
.. data:: malformed_destination_mac_multicast = 10
Invalid multicast destination MAC address
.. data:: malformed_tlv_offset = 11
TLV offset too short or beyond the end of the
packet
.. data:: malformed_lbm_source_mac = 12
Invalid source MAC address for LBM
.. data:: malformed_ltr_relay_action = 13
Unknown LTR relay action
.. data:: malformed_ltr_reply_tlv = 14
LTR has neither reply-ingress or reply-egress
.. data:: malformed_lt_origin = 15
Invalid Linktrace Message origin MAC address
.. data:: malformed_ltm_target = 16
Invalid LTM target MAC address
.. data:: malformed_source_mac = 17
Invalid source MAC address
.. data:: malformed_header_too_short = 18
Packet too short for CFM header
.. data:: malformed_tlv_header_overrun = 19
TLV header extends beyond the end of the packet
.. data:: malformed_tlv_overrun = 20
TLV extends beyond the end of the packet
.. data:: malformed_duplicate_sender_id = 21
Multiple Sender-ID TLVs found
.. data:: malformed_duplicate_port_status = 22
Multiple Port-status TLVs found
.. data:: malformed_duplicate_interface_status = 23
Multiple Interface-state TLVs found
.. data:: malformed_wrong_tlv = 24
Invalid TLV for this type of packet found
.. data:: malformed_duplicate_data = 25
Multiple Data TLVs found
.. data:: malformed_duplicate_ltr_egress_id = 26
Multiple LTR-Egress-ID TLVs found
.. data:: malformed_duplicate_reply_ingress = 27
Multiple Reply-ingress TLVs found
.. data:: malformed_duplicate_reply_egress = 28
Multiple Reply-egress TLVs found
.. data:: malformed_duplicate_ltm_egress_id = 29
Multiple LTM-Egress-ID TLVs found
.. data:: malformed_sender_id_size = 30
Sender-ID TLV is too | |
[target]
return df.drop(columns=target), df[target].values.ravel()
def format_cell(bg, t='black'):
return f'background-color: {bg};color: {t};'
def bg(style, subset=None, rev=True, axis=0):
"""Show style with highlights per column"""
if subset is None:
subset = style.data.columns
cmap = _cmap.reversed() if rev else _cmap
return style \
.background_gradient(cmap=cmap, subset=subset, axis=axis)
def highlight_val(df, m: dict):
m_replace = {k: format_cell(bg=v[0], t=v[1]) for k, v in m.items()}
return df.replace(m_replace)
def get_style(df):
return df.style \
.format('{:.3f}')
def show_scores(df, rev=False, lower_better=False):
score_cols = [col for col in df.columns if any(
item in col for item in ('test', 'train')) and not 'std' in col]
style = get_style(df) \
.pipe(bg, rev=rev) \
.pipe(bg, subset=score_cols, rev=not lower_better)
display(style)
def append_fit_score(df, scores, name):
return df
def append_mean_std_score(df=None, scores=None, name=None, show=False, scoring: dict = None):
"""Create df with mean and std of all scoring metrics"""
if df is None:
df = pd.DataFrame()
if isinstance(name, Pipeline):
# assume preprocessor then model name in pipeline.steps[1]
name = name.steps[1][0]
exclude = ['fit_time', 'score_time']
def name_cols(cols, type_): return {col: f'{type_}{col}' for col in cols}
score_cols = [col for col in scores.keys() if not col in exclude]
mean_cols = name_cols(score_cols, '')
std_cols = name_cols(score_cols, 'std_')
df_scores = pd.DataFrame(scores).mean() \
.rename(mean_cols) \
.append(
pd.DataFrame(scores)
.drop(columns=exclude)
.std()
.rename(std_cols))
df.loc[name, df_scores.index] = df_scores
# flip sign of mean cols for scorer where lower is better, eg MASE
if scoring:
for scorer_name, scorer in scoring.items():
if hasattr(scorer, '_sign') or 'neg' in str(scorer):
scorer_cols = [c for c in df.columns if not 'std' in c and all(
item in c.split('_') for item in (scorer_name,))]
df.loc[name, scorer_cols] = df.loc[name, scorer_cols] * -1
if show:
display(get_style(df))
return df
def df_transformed(data, ct):
"""Return dataframe of transformed df with correct feature names
Parameters
----------
data : np.ndarray
transformed data from ColumnTransformer
ct : ColumnTransFormer
Returns
-------
pd.DatFrame
"""
cols = get_ct_feature_names(ct=ct)
cols = [col[1] for col in cols] # convert tuples back to list
# print(cols)
return pd.DataFrame(data, columns=cols)
def df_coef(ct, model, num_features=20, best=True, round_coef=3, feat_imp=False) -> pd.DataFrame:
"""Get df of feature names with corresponding coefficients
Parameters
----------
ct : ColumnTransformer
model : any
sklearn model which implements `.coef_`
num_features : int
number of top features ranked by coefficient. Pass -1 to get all features
best : bool
if true, return best features (positive coef), else return worst features (negative coef)
round_coef : int
round coef column, default 3,
feat_imp : bool, default False
use 'feature_importances_' instead of coef
Returns
-------
pd.DataFrame
DataFrame of transformer name, feature name, coefficient
"""
coef = model.coef_[0] if not feat_imp else model.feature_importances_
m = dict(
# this is a tuple of (transformer, feature_name)
transformer_feature=get_ct_feature_names(ct),
coef=coef)
return pd.DataFrame(m) \
.pipe(lambda df: pd.concat([
df,
pd.DataFrame(df['transformer_feature'].to_list())], axis=1)) \
.rename(columns={0: 'transformer', 1: 'feature'}) \
.drop(columns=['transformer_feature']) \
.sort_values('coef', ascending=not best)[:num_features] \
.assign(coef=lambda x: x.coef.round(round_coef))[['transformer', 'feature', 'coef']]
def get_feature_out(estimator, feature_in):
if hasattr(estimator, 'get_feature_names'):
if isinstance(estimator, _VectorizerMixin):
# handling all vectorizers
return estimator.get_feature_names() # don't prepend 'vec'
# return [f'vec_{f}' \
# for f in estimator.get_feature_names()]
else:
return estimator.get_feature_names(feature_in)
elif isinstance(estimator, SelectorMixin):
return np.array(feature_in)[estimator.get_support()]
else:
return feature_in
def get_ct_feature_names(ct):
"""
Code adapted from https://stackoverflow.com/a/57534118/6278428
- handles all estimators, pipelines inside ColumnTransfomer
- doesn't work when remainder =='passthrough', which requires the input column names.
"""
output_features = []
# keep name associate with feature for further filtering in df
def make_tuple(name, features): return [
(name, feature_name) for feature_name in features]
for name, estimator, features in ct.transformers_:
if estimator == 'drop':
pass
elif not name == 'remainder':
if isinstance(estimator, Pipeline):
current_features = features
for step in estimator:
current_features = get_feature_out(step, current_features)
features_out = current_features
else:
features_out = get_feature_out(estimator, features)
output_features.extend(make_tuple(name, features_out))
elif estimator == 'passthrough':
output_features.extend(make_tuple(
name, ct._feature_names_in[features]))
# print(output_features)
return output_features
def pretty_dict(m: dict, html=False, prnt=True) -> str:
"""Print pretty dict converted to newlines
Paramaters
----
m : dict
html: bool
Use <br> instead of html
Returns
-------
str
'Key 1: value 1
'Key 2: value 2"
"""
s = json.dumps(m, indent=4)
newline_char = '\n' if not html else '<br>'
# remove these chars from string
remove = '}{\'"[]'
for char in remove:
s = s.replace(char, '')
s = s \
.replace(', ', newline_char) \
.replace(',\n', newline_char)
# remove leading and trailing newlines
s = re.sub(r'^[\n]', '', s)
s = re.sub(r'\s*[\n]$', '', s)
if prnt:
print(s)
else:
return s
def df_dict(m: dict, colname=None, prnt=True):
"""Quick display of dataframe from dict
Parameters
----------
m : dict
dictionary to display
colname : str, optional
prnt : bool, optional
"""
colname = colname or 'col1'
df = pd.DataFrame.from_dict(m, orient='index', columns=[colname])
if prnt:
display(df)
else:
return df
def inverse(m: dict) -> dict:
"""Return inverse of dict"""
return {v: k for k, v in m.items()}
def set_self(m, prnt=False, exclude=()):
"""Convenience func to assign an object's func's local vars to self"""
if not isinstance(exclude, tuple):
exclude = (exclude, )
exclude += ('__class__', 'self') # always exclude class/self
obj = m.get('self', None) # self must always be in vars dict
if obj is None:
return
for k, v in m.items():
if prnt:
print(f'\n\t{k}: {v}')
if not k in exclude:
setattr(obj, k, v)
def remove_bad_chars(w: str):
"""Remove any bad chars " : < > | . \ / * ? in string to make safe for filepaths"""
return re.sub('[":<>|.\\\/\*\?]', '', str(w))
def to_snake(s: str):
"""Convert messy camel case to lower snake case
Parameters
----------
s : str
string to convert to special snake case
Examples
--------
"""
s = remove_bad_chars(s).strip() # get rid of /<() etc
s = re.sub(r'[\]\[()]', '', s) # remove brackets/parens
s = re.sub(r'[\n-]', '_', s) # replace newline/dash with underscore
s = re.sub(r'[%]', 'pct', s)
# split on capital letters
expr = r'(?<!^)((?<![A-Z])|(?<=[A-Z])(?=[A-Z][a-z]))(?=[A-Z])'
return re \
.sub(expr, '_', s) \
.lower() \
.replace(' ', '_') \
.replace('__', '_')
def lower_cols(df):
"""Convert df columns to snake case and remove bad characters"""
is_list = False
if isinstance(df, pd.DataFrame):
cols = df.columns
else:
cols = df
is_list = True
m_cols = {col: to_snake(col) for col in cols}
if is_list:
return list(m_cols.values())
return df.pipe(lambda df: df.rename(columns=m_cols))
def parse_datecols(df, format=None):
"""Convert any columns with 'date' or 'time' in header name to datetime"""
datecols = list(filter(lambda x: any(s in x.lower()
for s in ('date', 'time')), df.columns))
df[datecols] = df[datecols].apply(
pd.to_datetime, errors='coerce', format=format)
return df
def mpl_dict(params):
""""Convert _ to . for easier mpl rcparams grid definition"""
return {k.replace('_', '.'): v for k, v in params.items()}
def all_except(df, exclude: list):
"""Return all cols in df except exclude
Parameters
----------
df : pd.DataFrame
exclude : list | iterable
column names to exclude
Returns
-------
list
list of all cols in df except exclude
"""
return [col for col in df.columns if not any(col in lst for lst in exclude)]
# scoring
def smape(y_true, y_pred, h=1, **kw):
"""Calculate symmetric mean absolute percentage error
Parameters
----------
y_true : array-like
Ground truth values
y_pred : array-like
Predicted values
h : int, optional
The forecast horizon, by default 1
Returns
-------
float :
The sMAPE of the `y_pred` against `y_true`
"""
return np.mean(2.0 * np.abs(y_true - y_pred) / ((np.abs(y_true) + np.abs(y_pred))*h))
def mase(y_true, y_pred, h=1, **kw):
"""Calculates the mean averaged scaled error for a time series by comparing
to the naive forecast (shift h prior value forward as the forecast)
Parameters
----------
y_true : array-like
Ground truth values
y_pred : array-like
Predicted values
h : int, optional
The forecast horizon, by default 1
Returns
-------
float :
The MASE for `y_true` and `y_pred`
"""
d = np.abs(np.diff(y_true)).sum() / (y_pred.shape[0] - 1)
errors = np.abs(y_true - y_pred)
return errors.mean()/(d*h)
# assert mase(np.array([1,2,3,4,5,6]),np.array([2,3,4,5,6,7])) == 1.0, "MASE bust"
def avg_mase_smape(y_true, y_pred, h=1, **kw):
"""Calculates the average of MASE and SMAPE for time series predictions
Parameters
----------
y_true : array-like
Ground truth values
y_pred : array-like
Predicted values
h : int, optional
The forecast horizon, by default 1
Returns
-------
float :
The (SMAPE + MASE)/2 for `y_true` and `y_pred`
"""
return (smape(y_true, y_pred, h=h) + mase(y_true, y_pred, h=h))/2
def reverse_pct(df, start_num, pct_col, num_col):
"""Convert % change back to number"""
nums_out = []
for i in range(df.shape[0]):
start_num = df[pct_col].iloc[i] * start_num + start_num
nums_out.append(start_num)
return | |
import random
import pandas as pd
import pytest
from evalml.preprocessing.data_splitters import BalancedClassificationSampler
@pytest.mark.parametrize("ratio,samples,percentage,seed",
[(1, 1, 0.2, 1),
(3.3, 101, 0.5, 100)])
def test_balanced_classification_init(ratio, samples, percentage, seed):
bcs = BalancedClassificationSampler(balanced_ratio=ratio, min_samples=samples, min_percentage=percentage, random_seed=seed)
assert bcs.balanced_ratio == ratio
assert bcs.min_samples == samples
assert bcs.min_percentage == percentage
assert bcs.random_seed == seed
def test_balanced_classification_errors():
with pytest.raises(ValueError, match="balanced_ratio must be"):
BalancedClassificationSampler(balanced_ratio=-1)
with pytest.raises(ValueError, match="min_sample must be"):
BalancedClassificationSampler(min_samples=0)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=0)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=0.6)
with pytest.raises(ValueError, match="min_percentage must be"):
BalancedClassificationSampler(min_percentage=-1.3)
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_balanced_simple(num_classes):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([i % num_classes for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
def test_classification_severely_imbalanced_binary_simple():
X = pd.DataFrame({"a": [i for i in range(1000)]})
# 5 instances of positive 1
y = pd.Series([1 if i % 200 != 0 else 0 for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
def test_classification_severely_imbalanced_multiclass_simple():
X = pd.DataFrame({"a": [i for i in range(1000)]})
# 9 instances of 1, 9 instances of 2
y = pd.Series([0 if i % 55 != 0 else (1 + i % 2) for i in range(1000)])
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4, 5, 10])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_balanced_ratio(num_classes, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 750 + [1] * 250)
else:
y = pd.Series([0] * 600 + [1] * 200 + [2] * 200)
bcs = BalancedClassificationSampler(balanced_ratio=balanced_ratio)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if balanced_ratio >= 3:
# the classes are considered balanced, do nothing
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
else:
# remove some samples
assert len(X2) == {2: (250 * (balanced_ratio + 1)), 3: (200 * (balanced_ratio + 2))}[num_classes]
assert len(y2) == len(X2)
assert y2.value_counts().values[0] == balanced_ratio * {2: 250, 3: 200}[num_classes]
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_min_samples(num_classes, min_samples):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 900 + [1] * 100)
else:
y = pd.Series([0] * 799 + [1] * 101 + [2] * 100)
bcs = BalancedClassificationSampler(balanced_ratio=1, min_samples=min_samples)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_samples <= 100:
# balance 1:1 without conflicting with min_samples
assert len(X2) == {2: 200, 3: 300}[num_classes]
assert y2.value_counts().values[0] == 100
else:
# cannot balance 1:1, choosing the min_samples size for the majority class and add minority class(es)
if num_classes == 2:
assert len(X2) == min_samples + 100
assert y2.value_counts().values[0] == min_samples
else:
assert len(X2) == min_samples + 201
assert y2.value_counts().values[0] == min_samples
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("num_classes", [2, 3])
def test_classification_imbalanced_min_percentage(num_classes, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if num_classes == 2:
y = pd.Series([0] * 950 + [1] * 50)
else:
y = pd.Series([0] * 820 + [1] * 90 + [2] * 90)
bcs = BalancedClassificationSampler(balanced_ratio=1, min_percentage=min_percentage)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_percentage <= 0.05:
# does not classify as severe imbalance, so balance 1:1 with min_samples==100
assert len(X2) == {2: 150, 3: 280}[num_classes]
assert y2.value_counts().values[0] == 100
else:
# severe imbalance, do nothing
pd.testing.assert_frame_equal(X2, X)
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_severe_imbalance_binary(min_samples, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([0] * 850 + [1] * 150) # minority class is 15% of total distribution
bcs = BalancedClassificationSampler(balanced_ratio=2, min_samples=min_samples, min_percentage=min_percentage)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_samples >= 200 and min_percentage >= 0.2:
# severe imbalance, do nothing
pd.testing.assert_frame_equal(X2, X)
else:
# does not classify as severe imbalance, so balance 2:1 with min_samples
assert len(X2) == 150 + max(min_samples, 2 * 150)
assert y2.value_counts().values[0] == max(min_samples, 2 * 150)
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4.5, 5, 6, 10])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_normal_imbalance_binary(min_samples, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([0] * 850 + [1] * 150) # minority class is 15% of total distribution, never counts as severe imbalance
bcs = BalancedClassificationSampler(balanced_ratio=balanced_ratio, min_samples=min_samples)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if balanced_ratio >= 6:
# data is balanced, do nothing
pd.testing.assert_frame_equal(X2, X)
else:
# rebalance according to the ratio and min_samples
assert len(X2) == 150 + max(min_samples, int(balanced_ratio * 150))
assert y2.value_counts().values[0] == max(min_samples, int(balanced_ratio * 150))
@pytest.mark.parametrize("data_type", ['n', 's'])
@pytest.mark.parametrize("min_percentage", [0.01, 0.05, 0.2, 0.3])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_severe_imbalance_multiclass(data_type, min_samples, min_percentage):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if data_type == 'n':
y = pd.Series([0] * 800 + [1] * 100 + [2] * 100) # minority class is 10% of total distribution
else:
y = pd.Series(["class_1"] * 800 + ["class_2"] * 100 + ["class_3"] * 100)
bcs = BalancedClassificationSampler(balanced_ratio=2, min_samples=min_samples, min_percentage=min_percentage)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if min_samples >= 200 and min_percentage >= 0.2:
# severe imbalance, do nothing
pd.testing.assert_frame_equal(X2, X)
else:
# does not classify as severe imbalance, so balance 2:1 with min_samples
assert len(X2) == 200 + max(min_samples, 2 * 100)
assert y2.value_counts().values[0] == max(min_samples, 2 * 100)
@pytest.mark.parametrize("data_type", ['n', 's'])
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4.5, 5, 6, 10])
@pytest.mark.parametrize("min_samples", [10, 50, 100, 200, 500])
def test_classification_imbalanced_normal_imbalance_multiclass(data_type, min_samples, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
if data_type == 'n':
y = pd.Series([0] * 800 + [1] * 100 + [2] * 100) # minority class is 10% of total distribution
else:
y = pd.Series(["class_1"] * 800 + ["class_2"] * 100 + ["class_3"] * 100)
bcs = BalancedClassificationSampler(balanced_ratio=balanced_ratio, min_samples=min_samples)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if balanced_ratio > 6:
# data is balanced, do nothing
pd.testing.assert_frame_equal(X2, X)
else:
# rebalance according to the ratio and min_samples
assert len(X2) == 200 + max(min_samples, int(balanced_ratio * 100))
assert y2.value_counts().values[0] == max(min_samples, int(balanced_ratio * 100))
@pytest.mark.parametrize("balanced_ratio", [1, 2, 3, 4.5, 5, 6, 10])
@pytest.mark.parametrize("random_seed", [0, 1, 2, 300])
def test_classification_imbalanced_random_seed(random_seed, balanced_ratio):
X = pd.DataFrame({"a": [i for i in range(1000)]})
y = pd.Series([0] * 800 + [1] * 200)
bcs1 = BalancedClassificationSampler(balanced_ratio=balanced_ratio, random_seed=random_seed)
bcs2 = BalancedClassificationSampler(balanced_ratio=balanced_ratio, random_seed=random_seed)
indices1 = bcs1.fit_resample(X, y)
X1 = X.loc[indices1]
y1 = y.loc[indices1]
indices2 = bcs2.fit_resample(X, y)
X2 = X.loc[indices2]
y2 = y.loc[indices2]
if balanced_ratio >= 4.5:
# data is balanced
pd.testing.assert_frame_equal(X1, X)
else:
assert len(X2) == 200 + int(balanced_ratio * 200)
assert y2.value_counts().values[0] == int(balanced_ratio * 200)
pd.testing.assert_frame_equal(X1, X2)
pd.testing.assert_series_equal(y1, y2)
@pytest.mark.parametrize("index", [[f'hello_{i}' for i in range(1000)],
random.shuffle([i + 0.5 for i in range(1000)]),
pd.MultiIndex.from_arrays([
[f"index_{i}" for i in range(1000)],
[i for i in range(1000)]
])])
def test_classification_imbalanced_custom_indices(index):
X = pd.DataFrame({"a": [i for i in range(1000)]}, index=index)
y = pd.Series([0] * 900 + [1] * 100, index=index)
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
assert len(X2) == 500
assert all(y2.value_counts(0).values == [400, 100])
assert all(y2.index.values == X2.index.values)
assert len(set(y2.index.values).intersection(set(y.index.values))) == len(y2)
@pytest.mark.parametrize("size", [100, 200, 500])
def test_classification_imbalanced_small_dataset(size):
X = pd.DataFrame({"a": [i for i in range(size)]})
y = pd.Series([0] * int(0.8 * size) + [1] * int(0.2 * size))
bcs = BalancedClassificationSampler(balanced_ratio=1)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if size == 100:
pd.testing.assert_frame_equal(X2, X)
else:
assert len(X2) == 0.2 * size + 100
bcs2 = BalancedClassificationSampler(balanced_ratio=1, min_samples=40)
indices = bcs2.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
if size == 500:
# resulting majority size is 100
assert len(X2) == 200
assert y2.value_counts(normalize=True).values[0] == 0.5
else:
assert len(X2) == 0.2 * size + 40
assert y2.value_counts().values[0] == 40
def test_classification_imbalanced_multiple_multiclass():
X = pd.DataFrame({"a": [i for i in range(10000)]})
y = pd.Series([0] * 4900 + [1] * 4900 + [2] * 200) # minority class is 2% of data
bcs = BalancedClassificationSampler(min_samples=201)
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 = y.loc[indices]
# severe imbalanace case, don't resample
pd.testing.assert_frame_equal(X, X2)
pd.testing.assert_series_equal(y, y2)
bcs = BalancedClassificationSampler()
indices = bcs.fit_resample(X, y)
X2 = X.loc[indices]
y2 | |
np.linspace(0.0, duration, num=N)
node_map = {}
for func in self.instance_constraint_function_atoms:
time_value = func.args[0]
time_index = np.argmin(np.abs(time_vector - time_value))
free_index = determine_free_index(time_index,
func.__class__(self.time_symbol))
node_map[func] = free_index
self.instance_constraints_free_index_map = node_map
def _instance_constraints_func(self):
"""Returns a function that evaluates the instance constraints given
the free optimization variables."""
free = sm.DeferredVector('FREE')
def_map = {k: free[v] for k, v in
self.instance_constraints_free_index_map.items()}
subbed_constraints = [con.subs(def_map) for con in
self.instance_constraints]
f = sm.lambdify(([free] + list(self.known_parameter_map.keys())),
subbed_constraints, modules=[{'ImmutableMatrix':
np.array}, "numpy"])
return lambda free: f(free, *self.known_parameter_map.values())
def _instance_constraints_jacobian_indices(self):
"""Returns the row and column indices of the non-zero values in the
Jacobian of the constraints."""
idx_map = self.instance_constraints_free_index_map
num_eom_constraints = self.num_states * (self.num_collocation_nodes - 1)
rows = []
cols = []
for i, con in enumerate(self.instance_constraints):
funcs = con.atoms(sm.Function)
indices = [idx_map[f] for f in funcs]
row_idxs = num_eom_constraints + i * np.ones(len(indices),
dtype=int)
rows += list(row_idxs)
cols += indices
return np.array(rows), np.array(cols)
def _instance_constraints_jacobian_values_func(self):
"""Retruns the non-zero values of the constraint Jacobian associated
with the instance constraints."""
free = sm.DeferredVector('FREE')
def_map = {k: free[v] for k, v in
self.instance_constraints_free_index_map.items()}
funcs = []
num_vals_per_func = []
for con in self.instance_constraints:
partials = list(con.atoms(sm.Function))
num_vals_per_func.append(len(partials))
jac = sm.Matrix([con]).jacobian(partials)
jac = jac.subs(def_map)
funcs.append(sm.lambdify(([free] +
list(self.known_parameter_map.keys())),
jac, modules=[{'ImmutableMatrix':
np.array}, "numpy"]))
l = np.sum(num_vals_per_func)
def wrapped(free):
arr = np.zeros(l)
j = 0
for i, (f, num) in enumerate(zip(funcs, num_vals_per_func)):
arr[j:j + num] = f(free, *self.known_parameter_map.values())
j += num
return arr
return wrapped
def _gen_multi_arg_con_func(self):
"""Instantiates a function that evaluates the constraints given all
of the arguments of the functions, i.e. not just the free
optimization variables.
Instantiates
------------
_multi_arg_con_func : function
A function which returns the numerical values of the constraints
at collocation nodes 2,...,N.
Notes
-----
args:
all current states (x1i, ..., xni)
all previous states (x1p, ... xnp)
all current specifieds (s1i, ..., smi)
parameters (c1, ..., cb)
time interval (h)
args: (x1i, ..., xni, x1p, ... xnp, s1i, ..., smi, c1, ..., cb, h)
n: num states
m: num specified
b: num parameters
The function should evaluate and return an array:
[con_1_2, ..., con_1_N, con_2_2, ...,
con_2_N, ..., con_n_2, ..., con_n_N]
for n states and N-1 constraints at the time points.
"""
xi_syms = self.current_discrete_state_symbols
xp_syms = self.previous_discrete_state_symbols
xn_syms = self.next_discrete_state_symbols
si_syms = self.current_discrete_specified_symbols
sn_syms = self.next_discrete_specified_symbols
h_sym = self.time_interval_symbol
constant_syms = self.known_parameters + self.unknown_parameters
if self.integration_method == 'backward euler':
args = [x for x in xi_syms] + [x for x in xp_syms]
args += [s for s in si_syms] + list(constant_syms) + [h_sym]
current_start = 1
current_stop = None
adjacent_start = None
adjacent_stop = -1
elif self.integration_method == 'midpoint':
args = [x for x in xi_syms] + [x for x in xn_syms]
args += [s for s in si_syms] + [s for s in sn_syms]
args += list(constant_syms) + [h_sym]
current_start = None
current_stop = -1
adjacent_start = 1
adjacent_stop = None
f = ufuncify_matrix(args, self.discrete_eom,
const=constant_syms + (h_sym,),
tmp_dir=self.tmp_dir, parallel=self.parallel)
def constraints(state_values, specified_values, constant_values,
interval_value):
"""Returns a vector of constraint values given all of the
unknowns in the equations of motion over the 2, ..., N time
steps.
Parameters
----------
states : ndarray, shape(n, N)
The array of n states through N time steps.
specified_values : ndarray, shape(m, N) or shape(N,)
The array of m specifieds through N time steps.
constant_values : ndarray, shape(b,)
The array of b parameters.
interval_value : float
The value of the discretization time interval.
Returns
-------
constraints : ndarray, shape(N-1,)
The array of constraints from t = 2, ..., N.
[con_1_2, ..., con_1_N, con_2_2, ...,
con_2_N, ..., con_n_2, ..., con_n_N]
"""
if state_values.shape[0] < 2:
raise ValueError('There should always be at least two states.')
assert state_values.shape == (self.num_states,
self.num_collocation_nodes)
x_current = state_values[:, current_start:current_stop] # n x N - 1
x_adjacent = state_values[:, adjacent_start:adjacent_stop] # n x N - 1
# 2n x N - 1
args = [x for x in x_current] + [x for x in x_adjacent]
# 2n + m x N - 1
if len(specified_values.shape) == 2:
assert specified_values.shape == \
(self.num_input_trajectories,
self.num_collocation_nodes)
si = specified_values[:, current_start:current_stop]
args += [s for s in si]
if self.integration_method == 'midpoint':
sn = specified_values[:, adjacent_start:adjacent_stop]
args += [s for s in sn]
elif len(specified_values.shape) == 1 and specified_values.size != 0:
assert specified_values.shape == \
(self.num_collocation_nodes,)
si = specified_values[current_start:current_stop]
args += [si]
if self.integration_method == 'midpoint':
sn = specified_values[adjacent_start:adjacent_stop]
args += [sn]
args += [c for c in constant_values]
args += [interval_value]
num_constraints = state_values.shape[1] - 1
# TODO : Move this to an attribute of the class so that it is
# only initialized once and just reuse it on each evaluation of
# this function.
result = np.empty((num_constraints, state_values.shape[0]))
return f(result, *args).T.flatten()
self._multi_arg_con_func = constraints
def jacobian_indices(self):
"""Returns the row and column indices for the non-zero values in the
constraint Jacobian.
Returns
-------
jac_row_idxs : ndarray, shape(2 * n + q + r,)
The row indices for the non-zero values in the Jacobian.
jac_col_idxs : ndarray, shape(n,)
The column indices for the non-zero values in the Jacobian.
"""
N = self.num_collocation_nodes
n = self.num_states
num_constraint_nodes = N - 1
if self.integration_method == 'backward euler':
num_partials = n * (2 * n + self.num_unknown_input_trajectories +
self.num_unknown_parameters)
elif self.integration_method == 'midpoint':
num_partials = n * (2 * n + 2 *
self.num_unknown_input_trajectories +
self.num_unknown_parameters)
num_non_zero_values = num_constraint_nodes * num_partials
if self.instance_constraints is not None:
ins_row_idxs, ins_col_idxs = \
self._instance_constraints_jacobian_indices()
num_non_zero_values += len(ins_row_idxs)
jac_row_idxs = np.empty(num_non_zero_values, dtype=int)
jac_col_idxs = np.empty(num_non_zero_values, dtype=int)
"""
The symbolic derivative matrix for a single constraint node follows
these patterns:
Backward Euler
--------------
i: ith, p: ith-1
For example:
x1i = the first state at the ith constraint node
uqi = the qth state at the ith constraint node
uqn = the qth state at the ith+1 constraint node
[x1] [x1i, ..., xni, x1p, ..., xnp, u1i, .., uqi, p1, ..., pr]
[. ]
[. ]
[. ]
[xn]
Midpoint
--------
i: ith, n: ith+1
[x1] [x1i, ..., xni, x1n, ..., xnn, u1i, .., uqi, u1n, ..., uqn, p1, ..., pp]
[. ]
[. ]
[. ]
[xn]
Each of these matrices are evaulated at N-1 constraint nodes and
then the 3D matrix is flattened into a 1d array. The backward euler
uses nodes 1 <= i <= N-1 and the midpoint uses 0 <= i <= N - 2. So
the flattened arrays looks like:
M = N-1
P = N-2
Backward Euler
--------------
i=1 x1 | [x11, ..., xn1, x10, ..., xn0, u11, .., uq1, p1, ..., pr,
x2 | x11, ..., xn1, x10, ..., xn0, u11, .., uq1, p1, ..., pr,
... | ...,
xn | x11, ..., xn1, x10, ..., xn0, u11, .., uq1, p1, ..., pr,
i=2 x1 | x12, ..., xn2, x11, ..., xn1, u12, .., uq2, p1, ..., pr,
x2 | x12, ..., xn2, x11, ..., xn1, u12, .., uq2, p1, ..., pr,
... | ...,
xn | x12, ..., xn2, x11, ..., xn1, u12, .., uq2, p1, ..., pr,
| ...,
i=M x1 | x1M, ..., xnM, x1P, ..., xnP, u1M, .., uqM, p1, ..., pr,
x2 | x1M, ..., xnM, x1P, ..., xnP, u1M, .., uqM, p1, ..., pr,
... | ...,
xn | x1M, ..., xnM, x1P, ..., xnP, u1M, .., uqM, p1, ..., pr]
Midpoint
--------
i=0 x1 | [x10, ..., xn0, x11, ..., xn1, u10, .., uq0, u11, .., uq1, p1, ..., pr,
x2 | x10, ..., xn0, x11, ..., xn1, u10, .., uq0, u11, .., uq1, p1, ..., pr,
... | ...,
xn | x10, ..., xn0, x11, ..., xn1, u10, .., uq0, u11, .., uq1, p1, ..., pr,
i=1 x1 | x11, ..., xn1, x12, ..., xn2, u11, .., uq1, u12, .., uq2, p1, ..., pr,
x2 | x11, ..., xn1, x12, ..., xn2, u11, .., uq1, u12, .., uq2, p1, ..., pr,
... | ...,
xn | x11, | |
this round."
if type(prize) == int:
print playerOrder[j + 1] + " now takes possession of The Wheel."
else:
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
if type(prize) == int:
break
if prize == "bankrupt":
print ""
print playerOrder[j], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[j][0] = 0
print "Possession of The Wheel passes to", playerOrder[(j + 1)] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if prize == "loseATurn":
print ""
print playerOrder[j], "spun for LOSE A TURN!"
"Sorry, " + playerOrder[j] + ". Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if selection == 0:
guess = get_guessWord()
if guess == hidden_word:
incom_word = guess
break
else:
print "Sorry, " + playerOrder[j] + ". That is not the correct puzzle solution."
print "Possession of the Wheel passes to " + playerOrder[j + 1] + "."
print ""
raw_input(string.center(("-" * 80), 80))
break
if incom_word == hidden_word:
## print "j is equal to:", j
playerOrder_val[j][0] = playerOrder_val_round[j][0] + playerOrder_val[j][0]
print "Congratulations,", playerOrder[j] + ". You correctly solved the puzzle:", string.upper(hidden_word) + "."
break
## if incom_word == hidden_word:
## break
break
return playerOrder_val
def round_three(playerOrder, playerOrder_val):
## print "playerOrder_val is equal to:", playerOrder_val
game_round = 1
hidden_word = choose_word(wordlist).lower()
playerOrder_val_round = [[0, 0], [0, 0], [0, 0]]
alpha = string.ascii_lowercase
disp_word = "_ " * len(hidden_word)
incom_word = "_" * len(hidden_word)
## print "The hidden_word is:", hidden_word
counter = 0
countDown = 11
while countDown > 0:
## for i in range(counter):
## counter -= 1
## print "counter is equal to:", counter
print ""
print "The third round puzzle is:", disp_word
for j in [0, 1, 2, 0, 1, 2, 0, 1, 2, 0]:
## counter -= 1
## print "counter is equal to:", counter
possession = True
if countDown == 0:
break
while possession == True:
countDown -= 1
if countDown == 0:
break
## print "counter is equal to:", counter
selection = 0
if counter > 0:
print disp_word
counter += 1
print ""
print "Remaining letters are:", str(string.upper(alpha))
print ""
selection = get_playerSelection(playerOrder, hidden_word, disp_word, j, playerOrder_val_round)
if selection == 3:
print ""
print "You chose to buy a vowel."
print ""
playerOrder_val_round[j][0] = (playerOrder_val_round[j][0] - 250)
guess = get_guessVowel()
guess = string.lower(guess)
if guess in alpha:
alpha = alpha.replace(guess, "")
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
print playerOrder[j + 1] + " now takes possession of The Wheel."
print ""
## possession = False
break
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word:
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0 or letter_app == 1:
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print ""
possession = False
break
if letter_app == 1:
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "Good guess,", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print ""
print string.center(("-" * 80), 80)
print ""
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print ""
possession = False
break
if selection == 1:
prize = get_prize(game_round)
subPrize = prize
if (type(prize) is int) or (prize == "freePlay"):
freePlay_choice = 0
if type(prize) is int:
print ""
print playerOrder[j] + " spun for $" + str(prize) + "!"
print ""
if prize is "freePlay":
print ""
print playerOrder[j], "spun for a FREE PLAY!"
print playerOrder[j] + ", you may solve or guess a letter (including vowels) without penalty."
print ""
selection_freePlay = get_freePlayChoice(playerOrder, j)
subPrize = 500
if selection_freePlay == 1:
guess = get_guessfreePlay()
if selection_freePlay == 2:
guess_word = get_guessWord()
if guess_word == hidden_word:
incom_word = guess_word
possession = True
break
else:
print ""
print "Sorry, that is not the solution to the puzzle."
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
guess = 1
if type(prize) is int:
guess = get_guessConsonant()
if type(guess) == str:
guess = string.lower(guess)
if guess != 1:
if guess in alpha:
alpha = alpha.replace(guess, "")
print ""
print string.center(("-" * 80), 80)
print string.center(("Vanna, does the puzzle contain any '" + guess.upper() + "'s?"), 80)
raw_input(string.center(("-" * 80), 80))
print ""
if guess in hidden_word or prize == "freePlay":
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
disp_word = disp_word[0:(i * 2)] + guess + disp_word[((i * 2) + 1):]
incom_word = incom_word[0:i] + guess + incom_word[(i + 1):]
letter_app = 0
for i in range(len(hidden_word)):
if hidden_word[i] == guess:
letter_app += 1
if letter_app == 0:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j + 1] + "."
print string.center(("-" * 80), 80)
print ""
possession = False
break
if letter_app == 1:
print disp_word
print ""
print "Good guess,", playerOrder[j] + "! There is 1", guess.upper(), "in the puzzle!"
print "That adds $" + str(subPrize) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if letter_app >= 2:
print disp_word
print ""
print "Good guess:", playerOrder[j] + "! There are", letter_app, "'" + guess.upper() + "'s in the puzzle!"
print "That adds $" + str(subPrize * letter_app) + " to your total prize score!"
print ""
playerOrder_val_round[j][0] = playerOrder_val_round[j][0] + (subPrize * letter_app)
print string.center(("-" * 80), 80)
print ""
print string.center((playerOrder[j] + "'s total prize score is now $" + str(playerOrder_val_round[j][0]) + "!"), 80)
print ""
raw_input(string.center(("-" * 80), 80))
print ""
possession = True
if incom_word == hidden_word:
break
else:
print "I'm sorry", playerOrder[j] + ", but there are no '" + guess.upper() + "'s in the puzzle."
if prize == "freePlay":
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
else:
print "Possession of The Wheel passes to " + playerOrder[j] + "."
possession = False
break
else:
print ""
print "Sorry, '" + string.upper(guess) + "' has already been called in this round."
if type(prize) == int:
print playerOrder[j + 1] + " now takes possession of The Wheel."
else:
print "Your Free Play spin, however, means that you keep possession of The Wheel."
print ""
if type(prize) == int:
break
if prize == "bankrupt":
print ""
print playerOrder[j], "spun for BANKRUPT, bringing his total prize for this round to $0."
playerOrder_val_round[j][0] = 0
print "Possession of The Wheel passes to", playerOrder[(j + 1)] + "."
print ""
raw_input(string.center(("-" * 80), 80))
print ""
break
if prize == "loseATurn":
print ""
print playerOrder[j], "spun for LOSE A TURN!"
"Sorry, " + playerOrder[j] + ". Possession | |
"""
System tests.
<NAME> <<EMAIL>>
pytest tmpdir docs:
http://doc.pytest.org/en/latest/tmpdir.html#the-tmpdir-fixture
"""
import copy
import shutil
import re
from pathlib import Path
import textwrap
import click.testing
from mailmerge.__main__ import main
from . import utils
def test_no_options(tmpdir):
"""Verify help message when called with no options.
Run mailmerge at the CLI with no options. Do this in an empty temporary
directory to ensure that mailmerge doesn't find any default input files.
"""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, [])
assert result.exit_code == 1
assert 'Error: can\'t find template "mailmerge_template.txt"' in \
result.output
assert "https://github.com/awdeorio/mailmerge" in result.output
def test_sample(tmpdir):
"""Verify --sample creates sample input files."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--sample"])
assert not result.exception
assert result.exit_code == 0
assert Path(tmpdir/"mailmerge_template.txt").exists()
assert Path(tmpdir/"mailmerge_database.csv").exists()
assert Path(tmpdir/"mailmerge_server.conf").exists()
assert "Created sample template" in result.output
assert "Created sample database" in result.output
assert "Created sample config" in result.output
def test_sample_clobber_template(tmpdir):
"""Verify --sample won't clobber template if it already exists."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
Path("mailmerge_template.txt").touch()
result = runner.invoke(main, ["--sample"])
assert result.exit_code == 1
assert "Error: file exists: mailmerge_template.txt" in result.output
def test_sample_clobber_database(tmpdir):
"""Verify --sample won't clobber database if it already exists."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
Path("mailmerge_database.csv").touch()
result = runner.invoke(main, ["--sample"])
assert result.exit_code == 1
assert "Error: file exists: mailmerge_database.csv" in result.output
def test_sample_clobber_config(tmpdir):
"""Verify --sample won't clobber config if it already exists."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
Path("mailmerge_server.conf").touch()
result = runner.invoke(main, ["--sample"])
assert result.exit_code == 1
assert "Error: file exists: mailmerge_server.conf" in result.output
def test_defaults(tmpdir):
"""When no options are provided, use default input file names."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--sample"])
assert not result.exception
assert result.exit_code == 0
with tmpdir.as_cwd():
result = runner.invoke(main, [])
assert not result.exception
assert result.exit_code == 0
assert "message 1 sent" in result.output
assert "Limit was 1 message" in result.output
assert "This was a dry run" in result.output
def test_bad_limit(tmpdir):
"""Verify --limit with bad value."""
# Simple template
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: {{email}}
FROM: <EMAIL>
Hello world
"""), encoding="utf8")
# Simple database with two entries
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
email
<EMAIL>
<EMAIL>
"""), encoding="utf8")
# Simple unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--dry-run", "--limit", "-1"])
assert result.exit_code == 2
assert "Error: Invalid value" in result.output
def test_limit_combo(tmpdir):
"""Verify --limit 1 --no-limit results in no limit."""
# Simple template
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: {{email}}
FROM: <EMAIL>
Hello world
"""), encoding="utf8")
# Simple database with two entries
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
email
<EMAIL>
<EMAIL>
"""), encoding="utf8")
# Simple unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--no-limit", "--limit", "1"])
assert not result.exception
assert result.exit_code == 0
assert "message 1 sent" in result.output
assert "message 2 sent" in result.output
assert "Limit was 1" not in result.output
def test_template_not_found(tmpdir):
"""Verify error when template input file not found."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--template", "notfound.txt"])
assert result.exit_code == 1
assert "Error: can't find template" in result.output
def test_database_not_found(tmpdir):
"""Verify error when database input file not found."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
Path("mailmerge_template.txt").touch()
result = runner.invoke(main, ["--database", "notfound.csv"])
assert result.exit_code == 1
assert "Error: can't find database" in result.output
def test_config_not_found(tmpdir):
"""Verify error when config input file not found."""
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
Path("mailmerge_template.txt").touch()
Path("mailmerge_database.csv").touch()
result = runner.invoke(main, ["--config", "notfound.conf"])
assert result.exit_code == 1
assert "Error: can't find config" in result.output
def test_help():
"""Verify -h or --help produces a help message."""
runner = click.testing.CliRunner()
result1 = runner.invoke(main, ["--help"])
assert result1.exit_code == 0
assert "Usage:" in result1.stdout
assert "Options:" in result1.stdout
result2 = runner.invoke(main, ["-h"]) # Short option is an alias
assert result1.stdout == result2.stdout
def test_version():
"""Verify --version produces a version."""
runner = click.testing.CliRunner()
result = runner.invoke(main, ["--version"])
assert not result.exception
assert result.exit_code == 0
assert "version" in result.output
def test_bad_template(tmpdir):
"""Template mismatch with database header should produce an error."""
# Template has a bad key
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: {{error_not_in_database}}
SUBJECT: Testing mailmerge
FROM: <EMAIL>
Hello world
"""), encoding="utf8")
# Normal database
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
email
<EMAIL>
"""), encoding="utf8")
# Normal, unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge, which should exit 1
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, [])
assert result.exit_code == 1
# Verify output
assert "template.txt: 'error_not_in_database' is undefined" in \
result.output
def test_bad_database(tmpdir):
"""Database read error should produce a sane error."""
# Normal template
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: <EMAIL>
FROM: <EMAIL>
{{message}}
"""), encoding="utf8")
# Database with unmatched quote
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
message
"hello world
"""), encoding="utf8")
# Normal, unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge, which should exit 1
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, [])
assert result.exit_code == 1
# Verify output
assert "database.csv:1: unexpected end of data" in result.output
def test_bad_config(tmpdir):
"""Config containing an error should produce an error."""
# Normal template
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: <EMAIL>
FROM: <EMAIL>
"""), encoding="utf8")
# Normal database
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
dummy
asdf
"""), encoding="utf8")
# Server config is missing host
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
port = 25
"""), encoding="utf8")
# Run mailmerge, which should exit 1
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, [])
assert result.exit_code == 1
# Verify output
assert "server.conf: No option 'host' in section: 'smtp_server'" in \
result.output
def test_attachment(tmpdir):
"""Verify attachments feature output."""
# First attachment
attachment1_path = Path(tmpdir/"attachment1.txt")
attachment1_path.write_text("Hello world\n", encoding="utf8")
# Second attachment
attachment2_path = Path(tmpdir/"attachment2.txt")
attachment2_path.write_text("Hello mailmerge\n", encoding="utf8")
# Template with attachment header
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: {{email}}
FROM: <EMAIL>
ATTACHMENT: attachment1.txt
ATTACHMENT: attachment2.txt
Hello world
"""), encoding="utf8")
# Simple database
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
email
<EMAIL>
"""), encoding="utf8")
# Simple unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--output-format", "text"])
assert not result.exception
assert result.exit_code == 0
# Verify output
assert ">>> message part: text/plain" in result.output
assert "Hello world" in result.output # message
assert ">>> message part: attachment attachment1.txt" in result.output
assert ">>> message part: attachment attachment2.txt" in result.output
def test_utf8_template(tmpdir):
"""Message is utf-8 encoded when only the template contains utf-8 chars."""
# Template with UTF-8 characters and emoji
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: {{email}}
FROM: <EMAIL>
Laȝamon 😀 klâwen
"""), encoding="utf8")
# Simple database without utf-8 characters
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
email
<EMAIL>
"""), encoding="utf8")
# Simple unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge
runner = click.testing.CliRunner()
result = runner.invoke(main, [
"--template", template_path,
"--database", database_path,
"--config", config_path,
"--dry-run",
"--output-format", "text",
])
assert not result.exception
assert result.exit_code == 0
# Remove the Date string, which will be different each time
stdout = copy.deepcopy(result.output)
stdout = re.sub(r"Date:.+", "Date: REDACTED", stdout, re.MULTILINE)
# Verify output
assert stdout == textwrap.dedent("""\
>>> message 1
TO: <EMAIL>
FROM: <EMAIL>
MIME-Version: 1.0
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: base64
Date: REDACTED
Laȝamon 😀 klâwen
>>> message 1 sent
>>> Limit was 1 message. To remove the limit, use the --no-limit option.
>>> This was a dry run. To send messages, use the --no-dry-run option.
""") # noqa: E501
def test_utf8_database(tmpdir):
"""Message is utf-8 encoded when only the databse contains utf-8 chars."""
# Simple template without UTF-8 characters
template_path = Path(tmpdir/"mailmerge_template.txt")
template_path.write_text(textwrap.dedent("""\
TO: <EMAIL>
FROM: <EMAIL>
{{message}}
"""), encoding="utf8")
# Database with utf-8 characters and emoji
database_path = Path(tmpdir/"mailmerge_database.csv")
database_path.write_text(textwrap.dedent("""\
message
Laȝamon 😀 klâwen
"""), encoding="utf8")
# Simple unsecure server config
config_path = Path(tmpdir/"mailmerge_server.conf")
config_path.write_text(textwrap.dedent("""\
[smtp_server]
host = open-smtp.example.com
port = 25
"""), encoding="utf8")
# Run mailmerge
runner = click.testing.CliRunner()
with tmpdir.as_cwd():
result = runner.invoke(main, ["--output-format", "text"])
assert not result.exception
assert result.exit_code == 0
# Remove the Date string, which will be different each time
stdout = copy.deepcopy(result.output)
stdout = re.sub(r"Date:.+", "Date: REDACTED", stdout, re.MULTILINE)
# Verify | |
import json
import thread
from appscale.tools.agents.base_agent import AgentConfigurationException
from appscale.tools.agents.base_agent import AgentRuntimeException
from appscale.tools.agents.base_agent import BaseAgent
from appscale.tools.agents.factory import InfrastructureAgentFactory
from utils import utils
from utils.persistent_dictionary import PersistentDictionary
from utils.persistent_dictionary import PersistentStoreFactory
class InfrastructureManager:
"""
InfrastructureManager class is the main entry point to the AppScale
Infrastructure Manager implementation. An instance of this class can
be used to start new virtual machines in a specified cloud environment
and terminate virtual machines when they are no longer required. Instances
of this class also keep track of the virtual machines spawned by them
and hence each InfrastructureManager instance can be queried to obtain
information about any virtual machines spawned by each of them in the
past.
This implementation is completely cloud infrastructure agnostic
and hence can be used to spawn/terminate instances on a wide range of
cloud (IaaS) environments. All the cloud environment specific operations
are delegated to a separate cloud agent and the InfrastructureManager
initializes cloud agents on demand by looking at the 'infrastructure'
parameter passed into the methods of this class.
"""
# Default reasons which might be returned by this module
REASON_BAD_SECRET = 'bad secret'
REASON_BAD_VM_COUNT = 'bad vm count'
REASON_BAD_ARGUMENTS = 'bad arguments'
REASON_OPERATION_ID_NOT_FOUND = 'operation_id not found'
REASON_NONE = 'none'
# Parameters required by InfrastructureManager
PARAM_OPERATION_ID = 'operation_id'
PARAM_INFRASTRUCTURE = 'infrastructure'
PARAM_NUM_VMS = 'num_vms'
# States a particular request could be in.
STATE_PENDING = 'pending'
STATE_SUCCESS = 'success'
STATE_FAILED = 'failed'
# A list of parameters required to query the InfrastructureManager about
# the state of a run_instances request.
DESCRIBE_INSTANCES_REQUIRED_PARAMS = (PARAM_OPERATION_ID,)
# A list of parameters required to initiate a VM deployment process
RUN_INSTANCES_REQUIRED_PARAMS = (
PARAM_INFRASTRUCTURE,
PARAM_NUM_VMS
)
# A list of parameters required to initiate a VM termination process
TERMINATE_INSTANCES_REQUIRED_PARAMS = ( PARAM_INFRASTRUCTURE, )
def __init__(self, params=None, blocking=False):
"""
Create a new InfrastructureManager instance. This constructor
accepts an optional boolean parameter which decides whether the
InfrastructureManager instance should operate in blocking mode
or not. A blocking InfrastructureManager does not return until
each requested run/terminate operation is complete. This mode
is useful for testing and verification purposes. In a real-world
deployment it's advisable to instantiate the InfrastructureManager
in the non-blocking mode as run/terminate operations could take
a rather long time to complete. By default InfrastructureManager
instances are created in the non-blocking mode.
Args
params A dictionary of parameters. Optional parameter. If
specified it must at least include the 'store_type' parameter.
blocking Whether to operate in blocking mode or not. Optional
and defaults to false.
"""
self.blocking = blocking
self.secret = utils.get_secret()
self.agent_factory = InfrastructureAgentFactory()
if params is not None:
store_factory = PersistentStoreFactory()
store = store_factory.create_store(params)
self.operation_ids = PersistentDictionary(store)
else:
self.operation_ids = PersistentDictionary()
def describe_operation(self, parameters, secret):
"""
Query the InfrastructureManager instance for details regarding
an operation id for running or terminating instances. This method accepts
a dictionary of parameters and a secret for authentication purposes.
The dictionary of parameters must include an 'operation_id' parameter
which is used to lookup calls that have been made to run or terminate
instances.
Args:
parameters A dictionary of parameters which contains a valid
'operation_id' parameter. A valid 'operation_id'
is an ID issued by the run_instances method of the
same InfrastructureManager object. Alternatively one
may provide a valid JSON string instead of a dictionary
object.
secret A previously established secret
Returns:
invalid key or an invalid 'operation_id':
'success': False
'reason': is set to an error message describing the cause.
If the provided secret key is valid and the parameters map contains
a valid 'operation_id' parameter, this method will return a
dictionary containing the following keys for the specified cases.
For a run_instances operation_id:
'success': True or False depending on the outcome of the virtual
machine deployment process.
'state': pending, failed, or success
'reason': set only in a failed case.
'vm_info': a dictionary containing the IP addresses of the spawned
virtual machines or None if the virtual machine deployment had
failed or still in the 'pending' state.
For a terminate_instances operation_id:
'success': True or False depending on the outcome of the virtual
machine deployment process.
'state': pending, failed, or success
'reason': set only in a failed case.
* note that this dictionary does not contain 'vm_info'.
Raises:
TypeError If the inputs are not of the expected types
ValueError If the input JSON string (parameters) cannot be parsed properly
"""
parameters, secret = self.__validate_args(parameters, secret)
if self.secret != secret:
return self.__generate_response(False, self.REASON_BAD_SECRET)
for param in self.DESCRIBE_INSTANCES_REQUIRED_PARAMS:
if not utils.has_parameter(param, parameters):
return self.__generate_response(False, 'no ' + param)
operation_id = parameters[self.PARAM_OPERATION_ID]
if self.operation_ids.has_key(operation_id):
return self.operation_ids.get(operation_id)
else:
return self.__generate_response(False, self.REASON_OPERATION_ID_NOT_FOUND)
def run_instances(self, parameters, secret):
"""
Start a new virtual machine deployment using the provided parameters. The
input parameter set must include an 'infrastructure' parameter which indicates
the exact cloud environment to use. Value of this parameter will be used to
instantiate a cloud environment specific agent which knows how to interact
with the specified cloud platform. The parameters map must also contain a
'num_vms' parameter which indicates the number of virtual machines that should
be spawned. In addition to that any parameters required to spawn VMs in the
specified cloud environment must be included in the parameters map.
If this InfrastructureManager instance has been created in the blocking mode,
this method will not return until the VM deployment is complete. Otherwise
this method will simply kick off the VM deployment process and return
immediately.
Args:
parameters A parameter map containing the keys 'infrastructure',
'num_vms' and any other cloud platform specific
parameters. Alternatively one may provide a valid
JSON string instead of a dictionary object.
secret A previously established secret
Returns:
If the secret is valid and all the required parameters are available in
the input parameter map, this method will return a dictionary containing
a special 'operation_id' key. If the secret is invalid or a required
parameter is missing, this method will return a different map with the
key 'success' set to False and 'reason' set to a simple error message.
Raises:
TypeError If the inputs are not of the expected types
ValueError If the input JSON string (parameters) cannot be parsed properly
"""
parameters, secret = self.__validate_args(parameters, secret)
utils.log('Received a request to run instances.')
if self.secret != secret:
utils.log('Incoming secret {0} does not match the current secret {1} - '\
'Rejecting request.'.format(secret, self.secret))
return self.__generate_response(False, self.REASON_BAD_SECRET)
for param in self.RUN_INSTANCES_REQUIRED_PARAMS:
if not utils.has_parameter(param, parameters):
return self.__generate_response(False, 'no ' + param)
num_vms = int(parameters[self.PARAM_NUM_VMS])
if num_vms <= 0:
utils.log('Invalid VM count: {0}'.format(num_vms))
return self.__generate_response(False, self.REASON_BAD_VM_COUNT)
infrastructure = parameters[self.PARAM_INFRASTRUCTURE]
agent = self.agent_factory.create_agent(infrastructure)
try:
agent.assert_required_parameters(parameters, BaseAgent.OPERATION_RUN)
except AgentConfigurationException as exception:
return self.__generate_response(False, str(exception))
operation_id = utils.get_random_alphanumeric()
status_info = {
'success': True,
'reason': 'received run request',
'state': self.STATE_PENDING,
'vm_info': None
}
self.operation_ids.put(operation_id, status_info)
utils.log('Generated operation id {0} for this run '
'instances request.'.format(operation_id))
if self.blocking:
self.__spawn_vms(agent, num_vms, parameters, operation_id)
else:
thread.start_new_thread(self.__spawn_vms,
(agent, num_vms, parameters, operation_id))
utils.log('Successfully started run instances request {0}.'.format(
operation_id))
return self.__generate_response(True,
self.REASON_NONE, {'operation_id': operation_id})
def terminate_instances(self, parameters, secret):
"""
Terminate a virtual machine using the provided parameters.
The input parameter map must contain an 'infrastructure' parameter which
will be used to instantiate a suitable cloud agent. Any additional
environment specific parameters should also be available in the same
map.
If this InfrastructureManager instance has been created in the blocking mode,
this method will not return until the VM deployment is complete. Otherwise
this method simply starts the VM termination process and returns immediately.
Args:
parameters A dictionary of parameters containing the required
'infrastructure' parameter and any other platform
dependent required parameters. Alternatively one
may provide a valid JSON string instead of a dictionary
object.
secret A previously established secret
Returns:
If the secret is valid and all the required parameters are available in
the input parameter map, this method will return a dictionary containing
a special 'operation_id' key. If the secret is invalid or a required
parameter is missing, this method will return a different | |
<filename>tests/data23/recipe-577507.py
##
# This module provides a powerful 'switch'-like dispatcher system.
# Values for switch cases can be anything comparable via '==', a string
# for use on the left-hand side of the 'in' operator, or a regular expression.
# Iterables of these types can also be used.
__author__ = '<NAME>'
import re
class SwitchError(Exception): pass
CPAT_TYPE = type(re.compile('.'))
STR_TYPE = type('')
LIST_TYPE = type([])
TUPLE_TYPE = type(())
class Switch(object):
def __init__(self):
self.exactCases = {}
self.inCases = []
self.patternCases = []
self.defaultHandler = None
##
# Try each 'in' case, in the order they were
# specified, stopping if we get a match.
# Return a tuple of the string we are searching for in the target string,
# and the case handler found, or (None, None) if no match found.
def _findInCase(self, switchValue):
for inStr, aHandler in self.inCases:
if inStr in switchValue:
return (inStr, aHandler)
return (None, None)
##
# Try each regex pattern (using re.search), in the order they were
# specified, stopping if we get a match.
# Return a tuple of the re match object and the case handler found, or
# (None, None) if no match found.
def _findRegExCase(self, switchValue):
for cpat, aHandler in self.patternCases:
matchObj = cpat.search(switchValue)
if matchObj is not None:
return (matchObj, aHandler)
return (None, None)
##
# Switch on a switch value. A match against the exact
# (non-regular-expression) case matches is tried first. If that doesn't
# find a match, then if the switch value is a string, the 'in' case
# matches are tried next, in the order they were registered. If that
# doesn't find a match, then if the switch value is a string,
# the regular-expression case matches are tried next, in
# the order they were registered. If that doesn't find a match, and
# a default case handler was registered, the default case handler is used.
# If no match was found, and no default case handler was registered,
# SwitchError is raised.
# If a switch match is found, the corresponding case handler is called.
# The switch value is passed as the first positional parameter, along with
# any other positional and keyword parameters that were passed to the
# switch method. The switch method returns the return value of the
# called case handler.
def switch(self, switchValue, *args, **kwargs):
caseHandler = None
switchType = type(switchValue)
try:
# Can we find an exact match for this switch value?
# For an exact match, we will pass the case value to the case
# handler.
caseHandler = self.exactCases.get(switchValue)
caseValue = switchValue
except TypeError:
pass
# If no exact match, and we have 'in' cases to try,
# see if we have a matching 'in' case for this switch value.
# For an 'in' operation, we will be passing the left-hand side of
# 'in' operator to the case handler.
if not caseHandler and switchType in (STR_TYPE, LIST_TYPE, TUPLE_TYPE) \
and self.inCases:
caseValue, caseHandler = self._findInCase(switchValue)
# If no 'in' match, and we have regex patterns to try,
# see if we have a matching regex pattern for this switch value.
# For a RegEx match, we will be passing the re.matchObject to the
# case handler.
if not caseHandler and switchType == STR_TYPE and self.patternCases:
caseValue, caseHandler = self._findRegExCase(switchValue)
# If still no match, see if we have a default case handler to use.
if not caseHandler:
caseHandler = self.defaultHandler
caseValue = switchValue
# If still no case handler was found for the switch value,
# raise a SwitchError.
if not caseHandler:
raise SwitchError("Unknown case value %r" % switchValue)
# Call the case handler corresponding to the switch value,
# passing it the case value, and any other parameters passed
# to the switch, and return that case handler's return value.
return caseHandler(caseValue, *args, **kwargs)
##
# Register a case handler, and the case value is should handle.
# This is a function decorator for a case handler. It doesn't
# actually modify the decorated case handler, it just registers it.
# It takes a case value (any object that is valid as a dict key),
# or any iterable of such case values.
def case(self, caseValue):
def wrap(caseHandler):
# If caseValue is not an iterable, turn it into one so
# we can handle everything the same.
caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \
else caseValue)
for aCaseValue in caseValues:
# Raise SwitchError on a dup case value.
if aCaseValue in self.exactCases:
raise SwitchError("Duplicate exact case value '%s'" % \
aCaseValue)
# Add it to the dict for finding exact case matches.
self.exactCases[aCaseValue] = caseHandler
return caseHandler
return wrap
##
# Register a case handler for handling a regular expression.
def caseRegEx(self, caseValue):
def wrap(caseHandler):
# If caseValue is not an iterable, turn it into one so
# we can handle everything the same.
caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \
else caseValue)
for aCaseValue in caseValues:
# If this item is not a compiled regular expression, compile it.
if type(aCaseValue) != CPAT_TYPE:
aCaseValue = re.compile(aCaseValue)
# Raise SwitchError on a dup case value.
for thisCaseValue, _ in self.patternCases:
if aCaseValue.pattern == thisCaseValue.pattern:
raise SwitchError("Duplicate regex case value '%s'" % \
aCaseValue.pattern)
self.patternCases.append((aCaseValue, caseHandler))
return caseHandler
return wrap
##
# Register a case handler for handling an 'in' operation.
def caseIn(self, caseValue):
def wrap(caseHandler):
# If caseValue is not an iterable, turn it into one so
# we can handle everything the same.
caseValues = ([ caseValue ] if not hasattr(caseValue, '__iter__') \
else caseValue)
for aCaseValue in caseValues:
# Raise SwitchError on a dup case value.
for thisCaseValue, _ in self.inCases:
if aCaseValue == thisCaseValue:
raise SwitchError("Duplicate 'in' case value '%s'" % \
aCaseValue)
# Add it to the the list of 'in' values.
self.inCases.append((aCaseValue, caseHandler))
return caseHandler
return wrap
##
# This is a function decorator for registering the default case handler.
def default(self, caseHandler):
self.defaultHandler = caseHandler
return caseHandler
if __name__ == '__main__': # pragma: no cover
# Example uses
# Instantiate a switch object.
mySwitch = Switch()
# Register some cases and case handlers, using the handy-dandy
# decorators.
# A default handler
@mySwitch.default
def gotDefault(value, *args, **kwargs):
print("Default handler: I got unregistered value %r, "\
"with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A single numeric case value.
@mySwitch.case(0)
def gotZero(value, *args, **kwargs):
print("gotZero: I got a %d, with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A range of numeric case values.
@mySwitch.case(list(range(5, 10)))
def gotFiveThruNine(value, *args, **kwargs):
print("gotFiveThruNine: I got a %d, with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A string case value, for an exact match.
@mySwitch.case('Guido')
def gotGuido(value, *args, **kwargs):
print("gotGuido: I got '%s', with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A string value for use with the 'in' operator.
@mySwitch.caseIn('lo')
def gotLo(value, *args, **kwargs):
print("gotLo: I got '%s', with args: %r and kwargs: %r" % \
(value, args, kwargs))
return value
# A regular expression pattern match in a string.
# You can also pass in a pre-compiled regular expression.
@mySwitch.caseRegEx(r'\b([Pp]y\w*)\b')
def gotPyword(matchObj, *args, **kwargs):
print("gotPyword: I got a matchObject where group(1) is '%s', "\
"with args: %r and kwargs: %r" % \
(matchObj.group(1), args, kwargs))
return matchObj
# And lastly, you can pass a iterable to case, caseIn, and
# caseRegEx.
@mySwitch.case([ 99, 'yo', 200 ])
def gotStuffInSeq(value, *args, **kwargs):
print("gotStuffInSeq: I got %r, with args: %r and kwargs: %r" % \
(value, args, kwargs))
| |
# from https://github.com/Subaru-PFS/drp_stella/blob/\
# 6cceadfc8721fcb1c7eb1571cf4b9bc8472e983d/src/SpectralPsf.cc
# // Binning by an odd factor requires the centroid at the center of a pixel.
# // Binning by an even factor requires the centroid on the edge of a pixel.
# the definitions used in primary image
# we separate if the image shape is odd or even, but at the moment there is no difference
if np.modf(shape_of_oversampled_image / 2)[0] == 0.0:
# 'shape is an even number'
shift_x_mod = np.array(
[-(np.round(primary_offset_axis_1) - primary_offset_axis_1),
-np.round(primary_offset_axis_1)])
shift_y_mod = np.array(
[-(np.round(primary_offset_axis_0) - primary_offset_axis_0),
-np.round(primary_offset_axis_0)])
else:
# 'shape is an odd number'
shift_x_mod = np.array(
[-(np.round(primary_offset_axis_1) - primary_offset_axis_1),
-np.round(primary_offset_axis_1)])
shift_y_mod = np.array(
[-(np.round(primary_offset_axis_0) - primary_offset_axis_0),
-np.round(primary_offset_axis_0)])
image_integer_offset = image[center_position +
int(shift_y_mod[1]) - 1 -
shape_of_oversampled_image:center_position +
int(shift_y_mod[1]) +
shape_of_oversampled_image + 1,
center_position +
int(shift_x_mod[1]) - 1 -
shape_of_oversampled_image: center_position +
int(shift_x_mod[1]) +
shape_of_oversampled_image + 1]
if simulation_00:
image_integer_offset = image[center_position +
int(shift_y_mod[1]) - 1 -
shape_of_oversampled_image:center_position +
int(shift_y_mod[1]) +
shape_of_oversampled_image + 1 + 1,
center_position +
int(shift_x_mod[1]) - 1 -
shape_of_oversampled_image: center_position +
int(shift_x_mod[1]) +
shape_of_oversampled_image + 1 + 1]
print('image_integer_offset shape: ' + str(image_integer_offset.shape))
image_integer_offset_lsst = lsst.afw.image.image.ImageD(image_integer_offset.astype('float64'))
oversampled_Image_LSST_apply_frac_offset = lsst.afw.math.offsetImage(
image_integer_offset_lsst, shift_x_mod[0], shift_y_mod[0], algorithmName='lanczos5', buffer=5)
single_primary_realization_oversampled = oversampled_Image_LSST_apply_frac_offset.array[1:-1, 1:-1]
assert single_primary_realization_oversampled.shape[0] == shape_of_sci_image * oversampling
single_primary_realization = resize(
single_primary_realization_oversampled, (shape_of_sci_image, shape_of_sci_image), ())
###################
# This part is skipped if there is only primary source in the image
# go through secondary loop if the flux ratio is not zero
# (TODO: if secondary too far outside the image, do not go through secondary)
if ratio_secondary != 0:
# overloading the definitions used in primary image
if np.modf(shape_of_oversampled_image / 2)[0] == 0.0:
# print('shape is an even number')
shift_x_mod = np.array(
[-(np.round(secondary_offset_axis_1) - secondary_offset_axis_1),
-np.round(secondary_offset_axis_1)])
shift_y_mod = np.array(
[-(np.round(secondary_offset_axis_0) - secondary_offset_axis_0),
-np.round(secondary_offset_axis_0)])
else:
# print('shape is an odd number')
shift_x_mod = np.array(
[-(np.round(secondary_offset_axis_1) - secondary_offset_axis_1),
-np.round(secondary_offset_axis_1)])
shift_y_mod = np.array(
[-(np.round(secondary_offset_axis_0) - secondary_offset_axis_0),
-np.round(secondary_offset_axis_0)])
image_integer_offset = image[center_position +
int(shift_y_mod[1]) - 1 - shape_of_oversampled_image:
center_position + int(shift_y_mod[1]) +
shape_of_oversampled_image + 2,
center_position + int(shift_x_mod[1]) -
1 - shape_of_oversampled_image:
center_position + int(shift_x_mod[1]) +
shape_of_oversampled_image + 2]
image_integer_offset_lsst = lsst.afw.image.image.ImageD(image_integer_offset.astype('float64'))
oversampled_Image_LSST_apply_frac_offset = lsst.afw.math.offsetImage(
image_integer_offset_lsst, shift_y_mod[0], shift_x_mod[0], algorithmName='lanczos5', buffer=5)
single_secondary_realization_oversampled =\
oversampled_Image_LSST_apply_frac_offset.array[1:-1, 1:-1]
single_secondary_realization = resize(
single_secondary_realization_oversampled, (shape_of_sci_image, shape_of_sci_image), ())
inverted_mask = ~mask_image.astype(bool)
###################
# create complete_realization which is just primary if no secondary source
# if there is secondary source, add two images together
if ratio_secondary != 0:
complete_realization = single_primary_realization + ratio_secondary * single_secondary_realization
complete_realization_renormalized = complete_realization * \
(np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))
else:
complete_realization = single_primary_realization
complete_realization_renormalized = complete_realization * \
(np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))
###################
# find chi values and save the results
if not return_full_result:
chi_2_almost_multi_values = self.create_chi_2_almost_Psf_position(
complete_realization_renormalized,
sci_image,
var_image,
mask_image,
use_only_chi=use_only_chi,
use_center_of_light=use_center_of_light,
simulation_00=simulation_00)
if self.verbosity == 1:
print(
'chi2 within shgo with use_only_chi ' +
str(use_only_chi) +
' and use_center_of_light ' +
str(use_center_of_light) +
' ' + str(x) + ' / ' + str(chi_2_almost_multi_values))
return chi_2_almost_multi_values
else:
if ratio_secondary != 0:
# print('ratio_secondary 2nd loop: '+str(ratio_secondary))
single_primary_realization_renormalized = single_primary_realization * \
(np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))
single_secondary_realization_renormalized = ratio_secondary * single_secondary_realization * \
(np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))
else:
# print('ratio_secondary 2nd loop 0: '+str(ratio_secondary))
single_primary_realization_renormalized = single_primary_realization * \
(np.sum(sci_image[inverted_mask]) * v_flux / np.sum(complete_realization[inverted_mask]))
single_secondary_realization_renormalized = np.zeros(
single_primary_realization_renormalized.shape)
if self.save == 1:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'image', image)
if ratio_secondary != 0:
np.save(self.TESTING_FINAL_IMAGES_FOLDER + 'image_full_for_secondary', image)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_secondary_realization',
single_secondary_realization)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_primary_realization',
single_primary_realization)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_primary_realization_renormalized_within_create_complete_realization',
single_primary_realization_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_secondary_realization_renormalized_within_create_complete_realization',
single_secondary_realization_renormalized)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'complete_realization_renormalized_within_create_complete_realization',
complete_realization_renormalized)
# TODO: should I modify this function to remove distance from physcial center of
# mass when using that option
chi_2_almost_multi_values = self.create_chi_2_almost_Psf_position(
complete_realization_renormalized,
sci_image,
var_image,
mask_image,
use_only_chi=use_only_chi,
use_center_of_light=use_center_of_light,
simulation_00=simulation_00)
# save the best oversampled image
if simulation_00:
if self.verbosity == 1:
print('saving oversampled simulation_00 image')
# print('I have to implement that again')
print(
'saving at ' +
self.TESTING_FINAL_IMAGES_FOLDER +
'single_primary_realization_oversampled')
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'single_primary_realization_oversampled_to_save',
single_primary_realization_oversampled)
np.save(
self.TESTING_FINAL_IMAGES_FOLDER +
'complete_realization_renormalized_to_save',
single_primary_realization_oversampled)
return chi_2_almost_multi_values,\
single_primary_realization_renormalized, single_secondary_realization_renormalized,\
complete_realization_renormalized
def create_chi_2_almost_Psf_position(self, modelImg, sci_image, var_image, mask_image,
use_only_chi=False, use_center_of_light=False, simulation_00=False):
"""Returns quality of the model's fit compared to the science image
Parameters
----------
modelImg: `np.array`, (N, N)
model image
sci_image: `np.array`, (N, N)
science image
var_image: `np.array`, (N, N)
variance image
mask_image: `np.array`, (N, N)
mask image
use_only_chi: `bool`
if True, minimize chi; if False, minimize chi^2
use_center_of_light: `bool`
if True, minimizes distance of center of light between science
and model image
simulation_00: `bool`
if True,do not move the center, for making fair comparisons between
models - optical center in places in the center of the image
if use_center_of_light==True the behaviour changes
and the result is the image with center of flux
in the center of the image
Returns
----------
measure_of_quality: `float`
returns the measure of quality
(chi**2, chi, or distance of center
of light between science and model image)
distance of center of light between science
and model image is given in units of pixels
Notes
----------
Called by create_complete_realization
"""
inverted_mask = ~mask_image.astype(bool)
var_image_masked = var_image * inverted_mask
sci_image_masked = sci_image * inverted_mask
modelImg_masked = modelImg * inverted_mask
# if you are minimizes chi or chi**2
if not use_center_of_light:
if not use_only_chi:
chi2 = (sci_image_masked - modelImg_masked)**2 / var_image_masked
chi2nontnan = chi2[~np.isnan(chi2)]
if use_only_chi:
chi2 = np.abs((sci_image_masked - modelImg_masked))**1 / np.sqrt(var_image_masked)
chi2nontnan = chi2[~np.isnan(chi2)]
return np.mean(chi2nontnan)
else:
if simulation_00 is False or simulation_00 is None:
if self.verbosity == 1:
print('sim00=False and center of light =true')
distance_of_flux_center = np.sqrt(
np.sum(
(np.array(
find_centroid_of_flux(modelImg_masked)) -
np.array(
find_centroid_of_flux(sci_image_masked)))**2))
else:
# if you pass both simulation_00 paramter and use_center_of_light=True,
# center of light will be centered in the downsampled image
if self.verbosity == 1:
print('sim00 = True and center of light = True')
distance_of_flux_center = np.sqrt(
np.sum(
(np.array(find_centroid_of_flux(modelImg_masked)) -
np.array(np.array(
np.ones((21, 21)).shape) / 2 - 0.5))**2))
return distance_of_flux_center
def fill_crop(self, img, pos, crop):
'''Fills `crop` with values from `img` at `pos`
while accounting for the crop being off the edge of `img`.
*Note:* negative values in `pos` are interpreted as-is,
not as "from the end".
Taken from https://stackoverflow.com/questions/41153803/zero-padding-slice-past-end-of-array-in-numpy #noqa:E501
'''
img_shape, pos, crop_shape = np.array(
img.shape, dtype=int), np.array(
pos, dtype=int), np.array(
crop.shape, dtype=int)
end = pos + crop_shape
# Calculate crop slice positions
crop_low = np.clip(0 - pos, a_min=0, a_max=crop_shape)
crop_high = crop_shape - np.clip(end - img_shape, a_min=0, a_max=crop_shape)
crop_slices = (slice(low, high) for low, high in zip(crop_low, crop_high))
# Calculate img slice positions
pos = np.clip(pos, a_min=0, a_max=img_shape)
end = np.clip(end, a_min=0, a_max=img_shape)
img_slices = (slice(low, high) for low, high in zip(pos, end))
try:
crop[tuple(crop_slices)] = img[tuple(img_slices)]
except TypeError:
print('TypeError in fill_crop function')
pass
def Ifun16Ne(lambdaV, lambda0, Ne):
"""Construct Lorentizan scattering kernel
Parameters
----------
lambdaV: `float`
wavelength at which compute the grating effect
lambda0: `float`
reference wavelength
Ne: `int`
number of effective grating lines of the spectrograph
Returns
----------
value_of_scatter: `float`
strenth of the kernel at lambdaV wavelength
"""
return (lambda0 / (Ne * np.pi * np.sqrt(2)))**2 / \
((lambdaV - lambda0)**2 + (lambda0 / (Ne * np.pi * np.sqrt(2)))**2)
def sky_scale(pupil_plane_size, lam, scale_unit=galsim.arcsec):
"""Return the image scale for this aperture at given wavelength.
@param lam Wavelength in nanometers.
@param scale_unit Units in which to return result [default: galsim.arcsec]
@returns Image scale
"""
return (lam * 1e-9) / pupil_plane_size * galsim.radians / scale_unit
def find_centroid_of_flux(image, mask=None):
"""Find center of flux in an image
function giving the tuple of the position of weighted average of
the flux in a square image
indentical result as calculateCentroid from drp_stella.images
@input image poststamp image for which to find center
@input mask mask, same size as the image
returns tuple with x and y center, in units of pixels
"""
if mask is None:
mask = np.ones(image.shape)
x_center = []
y_center = []
# if there are nan values (most likely cosmics), replace them with max value in the rest of the image
# careful, this can seriously skew the results if not used for this purpose
| |
import numpy as np
import ctypes as c
import os
# output params :
# 0 objective function
# 1 camber angle down
# 2 up
# 3 toe angle down
# 4 up
# 5 caster angle
# 6 roll centre height
# 7 caster trail
# 8 scrub radius
# 9 kingpin angle
# 10 anti squat / anti dive drive
# 11 anti rise / anti lift brake
# 12 half track change down
# 13 wheelbase change down
# 14 half track change up
# 15 wheelbase change up
# 16 distance lca3 to wcn-spn line
# 17 distance uca3 to wcn-spn line
# 18 distance tr2 to wcn-spn line
# 19 distance lca3 to plane with wcn-spn normal through wcn point
# 20 distance uca3 to plane with wcn-spn normal through wcn point
# 21 distance tr2 to plane with wcn-spn normal through wcn point
# wheel travel from rebound to bump, from downmost position w.r.t. chassis to
# upmost
class Suspension():
"""creates quarter suspension defined by XYZ cs where X points front, Y to the right side and Z down"""
wheel_radius = 210
wheelbase = 1530
cog_height = 300
drive_bias = 1
brake_bias = 0.4
suspension_position = 1 # 0 for front, 1 for rear
drive_position = 1 # 0 for outboard, 1 for inboard
brake_position = 0 # 0 for outboard, 1 for inboard
vertical_movement = 30
steering_movement = 10
vert_incr = 1 # 1 for optimisation testing, 1 or greater for movement testing
steer_incr = 0 # 0 for optimisation testing, 0 or greater for movement testing
precision = 0.001
# OUTPUT PARAMETERS
# optimisation output parameters
output_params_optimisation = []
output_params_optimisation_c = (c.c_float * 21)(*output_params_optimisation)
# suspension movement output parameters
camberAngle = []
camberAngle_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*camberAngle)
toeAngle = []
toeAngle_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*toeAngle)
casterAngle = []
casterAngle_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*casterAngle)
rcHeight = []
rcHeight_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*rcHeight)
casterTrail = []
casterTrail_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*casterTrail)
scrubRadius = []
scrubRadius_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*scrubRadius)
kingpinAngle = []
kingpinAngle_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*kingpinAngle)
antiDrive = []
antiDrive_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*antiDrive)
antiBrake = []
antiBrake_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*antiBrake)
halfTrackChange = []
halfTrackChange_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*halfTrackChange)
wheelbaseChange = []
wheelbaseChange_c = (c.c_float * ((vert_incr * 2 + 1) * (steer_incr * 2 + 1)))(*wheelbaseChange)
const_output_params_movement = []
const_output_params_movement_c = (c.c_float * 6)(*const_output_params_movement)
# point coordinates from CAD
# -2038.666, -411.709, -132.316
# -2241.147, -408.195, -126.205
# -2135, -600, -140
# -2040.563, -416.249, -275.203
# -2241.481, -417.314, -270.739
# -2153, -578, -315
# -2234.8, -411.45, -194.6
# -2225, -582, -220
# -2143.6, -620.5, -220.07
# -2143.6, -595.5, -219.34
# CAD suspension characteristics for optimisation
# -2.65 camber lo pos
# -0.98 camber up pos
# -0.07 toe lo pos
# 0.05 toe up pos
# 5.87 caster angle
# 55.97 roll centre height
# 21.96 caster trail
# -10.3 scrub radius
# 7.17 kingpin angle
# 16.9 anti drive
# 5.48 anti brake
# -4.93 half track change lo pos
# 0.76 half track change up pos
# 0.77 wheelbase change lo pos
# -0.74 wheelbase change up pos
# 79.9 lca3 free radius
# 96.59 uca3 free radius
# 81.41 tr2 free radius
# -22.83 lca3 wcn distance
# -39.71 uca3 wcn distance
# -38.49 tr2 wcn distance
# hps boundaries
hps_boundaries = [-2038.666,-490,-335,-180,-80, # 0 lca1x_opt, 1 lca1y_lo, 2 lca1y_up, 3 lca1z_lo, 4 lca1z_up
-2241.147,-490,-335,-180,-80, # 5 lca2x_opt, 6 lca2y_lo, 7 lca2y_up, 8 lca2z_lo, 9 lca2z_up
-2160,-2100,-630,-570,-170,-110, # 10 lca3x_lo, 11 lca3x_up, 12 lca3y_lo, 13 lca3y_up, 14 lca3z_lo, 15
# lca3z_up
-2040.563,-490,-335,-325,-225, # 16 uca1x_opt, 17 uca1y_lo, 18 uca1y_up, 19 uca1z_lo, 20 uca1z_up
-2241.481,-490,-335,-325,-225, # 21 uca2x_opt, 22 uca2y_lo, 23 uca2y_up, 24 uca2z_lo, 25 uca2z_up
-2180,-2120,-610,-550,-345,-285, # 26 uca3x_lo, 27 uca3x_up, 28 uca3y_lo, 29 uca3y_up, 30 uca3z_lo, 31
# uca3z_up
-2280,-2190,-485,-335,-245,-145, # 32 tr1x_lo, 33 tr1x_up, 34 tr1y_lo, 35 tr1y_up, 36 tr1z_lo, 37 tr1z_up
-2270,-2180,-610,-550,-280,-160, # 38 tr2x_lo, 39 tr2x_up, 40 tr2y_lo, 41 tr2y_up, 42 tr2z_lo, 43 tr2z_up
-2143.6, -620.5, -220.07, # indices: 44, 45, 46, # wcnx_opt, wcny_opt, wcnz_opt
-2143.6, -595.5, -219.34 # indices: 47, 48, 49 # spnx_opt, spny_opt, spnz_opt
]
# INPUT FOR OPTIMISATION.PY
# derived suspension characteristics boundaries
characteristics_boundaries = [-2.7, -2.6, # indices: 0, 1, # camber down pos lo hi lim
-1, -0.9, # indices: 2, 3, # camber up pos lo hi lim
-0.08, 0, # indices: 4, 5, # toe down pos lo hi lim
0, 0.06, # indices: 6, 7, # toe up pos lo hi lim
4, 15, # indices: 8, 9, # caster angle lo hi lim
50, 65, # indices: 10, 11, # roll centre height lo hi lim
10, 25, # indices: 12, 13, # caster trail lo hi lim
-15, 8, # indices: 14, 15, # scrub radius lo hi lim
3, 8, # indices: 16, 17, # kingpin angle lo hi lim
10, 18, # indices: 18, 19, # anti drive lo hi lim
0, 20, # indices: 20, 21, # anti brake lo hi lim
-10, 0, # indices: 22, 23, # half track change down pos lo hi lim
0, 3, # indices: 24, 25, # half track change up pos lo hi lim
-1.5, 1.5, # indices: 26, 27, # wheelbase change down pos lo hi lim
-1.5, 1.5, # indices: 28, 29, # wheelbase change up pos lo hi lim
60, 100, # indices: 30, 31, # inside wheel free radius LCA3 lo hi lim
60,100, # indices: 32, 33, # inside wheel free radius UCA3 lo hi lim
60,100, # indices: 34, 35, # inside wheel free radius TR2 lo hi lim
-100,-20, # indices: 36, 37, # minimum distance between plane defined by wcn
# and line wcn-spn and LCA3 (mm) lo hi lim
-100,-20, # indices: 38, 39, # minimum distance between plane defined by wcn
# and line wcn-spn and UCA3 (mm) lo hi lim
-100,-20, # indices: 40, 41 # minimum distance between plane defined by wcn
# and line wcn-spn and TR2 (mm) lo hi lim
]
# determines objective function shape
peak_width_values = [
#10, 10, 10, 10, 10, 10, 10, 10, 10,10,10,10, 10, 10, 10,10, 10,10, 10, 10, 10
0.10000000149011612 ,
0.10000000149011612 ,
10,
10,
0.10000000149011612 ,
10,
200000,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10,
10
]
peak_flatness_values = [
#2, 2, 2, 2, 6, 2, 6, 2, 2,2,2,2, 2, 2, 2,2, 2,2, 2, 2, 2
2,
2,
2,
2,
3,
2,
6,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2
]
# INPUT FOR DLL FILE OBJECTIVE FUNCTION
characteristics_target_values = [
#-2.65, -0.95, 0, 0, 4, 50, 10, -15, 3, 10, 10, 0, 0, 0, 0, 70, 70, 70, -80, -80, -80
-2.65,
-0.98,
-0.074,
0.048,
5.87,
55.97,
21.955,
-10.3,
7.165,
16.9,
5.47,
-4.93,
0.76,
0.77,
-0.74,
79.9,
96.588,
81.41,
-22.83,
-39.71,
-38.49
]
# INPUT FOR DLL FILE OBJECTIVE FUNCTION
characteristics_weight_factors = [
#1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
0.4032258093357086 ,
0.4032258093357086 ,
0.02016128972172737 ,
0.02016128972172737 ,
0.012096770107746124 ,
0.008064515888690948 ,
0.02016128972172737 ,
| |
"""This serves as a main function to run a worker."""
from qip.distributed.proto import *
from qip.distributed.proto.conversion import *
from qip.distributed.worker.worker_backends import SocketServerBackend
from qip.distributed.worker.worker_logger import WorkerLogger, PrintLogger
from qip.distributed.formatsock import FormatSocket
from qip.backend import CythonBackend, StateType
import socket
from threading import Thread, Lock
from typing import Mapping, Tuple, Callable
import ssl
import sys
class WorkerInstance:
"""A class created to serve a particular circuit + state."""
def __init__(self, serverapi: SocketServerBackend, workerpool: 'WorkerPoolServer',
setup: WorkerSetup, logger: WorkerLogger = None):
"""
Create the WorkerInstance
:param serverapi: backend class to communicate with the host that processes are done.
:param setup: Setup provided by manager
"""
if logger is None:
self.logger = PrintLogger()
else:
self.logger = logger
self.serverapi = serverapi
self.n = setup.n
self.job_id = setup.state_handle
self.inputstartindex = setup.state_index_start
self.inputendindex = setup.state_index_end
self.outputstartindex = setup.output_index_start
self.outputendindex = setup.output_index_end
self.pool = workerpool
indexgroups = list(pbindices_to_indices(state.indices) for state in setup.states)
feedstates = [get_state_value(state) for state in setup.states]
# Contact other workers and create a backend.
# Should be changed if worker allocations change.
if self.inputstartindex != self.outputstartindex:
for partner in setup.partners:
if partner.state_index_start != partner.output_index_start:
continue
if partner.state_index_start == self.inputstartindex or partner.state_index_start == self.outputstartindex:
self.pool.make_connection(self.job_id, self.inputstartindex, self.inputendindex,
self.outputstartindex, self.outputendindex, partner)
self.logger.making_state(self.job_id, self.inputstartindex, self.inputendindex,
self.outputstartindex, self.outputendindex)
self.state = CythonBackend.make_state(self.n, indexgroups, feedstates,
inputstartindex=self.inputstartindex, inputendindex=self.inputendindex,
outputstartindex=self.outputstartindex, outputendindex=self.outputendindex,
statetype=pbstatetype_to_statetype(setup.statetype))
def run(self):
while True:
self.logger.waiting_for_operation(self.job_id)
operation = self.serverapi.receive_operation()
self.logger.running_operation(self.job_id, operation)
self.logger(operation)
if operation.HasField('close'):
break
elif operation.HasField('kronprod'):
kronprod = operation.kronprod
mats = {}
for matop in kronprod.matrices:
indices, mat = pbmatop_to_matop(matop)
mats[indices] = mat
self.state.kronselect_dot(mats, input_offset=self.inputstartindex, output_offset=self.outputstartindex)
elif operation.HasField('total_prob'):
# Probability when measuring all bits (0xFFFFFFFF)
p = self.state.total_prob()
self.logger("Reporting p={}".format(p))
self.serverapi.report_probability(operation.job_id, measured_prob=p)
continue
elif operation.HasField('measure'):
indices = pbindices_to_indices(operation.measure.indices)
if operation.measure.soft:
measured = None
if operation.measure.HasField('measure_result'):
measured = operation.measure.measure_result.measured_bits
m, p = self.state.soft_measure(indices, measured=measured, input_offset=self.inputstartindex)
elif operation.measure.reduce:
measured = None
measured_prob = None
if operation.measure.HasField('measure_result'):
measured = operation.measure.measure_result.measured_bits
measured_prob = operation.measure.measure_result.measured_prob
m, p = self.state.reduce_measure(indices, measured=measured, measured_prob=measured_prob,
input_offset=self.inputstartindex,
output_offset=self.outputstartindex)
elif operation.measure.top_k:
top_indices, top_float = self.state.measure_probabilities(indices, top_k=operation.measure.top_k)
# An escape hatch from an escape hatch, I know.
self.serverapi.report_top_probs(self.job_id, top_indices, top_float)
continue
else:
measured = None
measured_prob = None
if operation.measure.HasField('measure_result'):
measured = operation.measure.measure_result.measured_bits
measured_prob = operation.measure.measure_result.measured_prob
m, p = self.state.measure(indices, measured=measured, measured_prob=measured_prob,
input_offset=self.inputstartindex,
output_offset=self.outputstartindex)
# For any measurement, report the bits and probabilty then continue (don't report_done).
self.logger("Reporting m={}\tp={}".format(m, p))
self.serverapi.report_probability(operation.job_id, measured_bits=m, measured_prob=p)
continue
elif operation.HasField('sync'):
# This logic assumes all workers given equal share, if ever changed then this must be fixed.
if self.inputstartindex == self.outputstartindex and self.inputendindex == self.outputendindex:
# If diagonal overwrite is on then don't need to receive anything from other workers.
if not operation.sync.diagonal_overwrite:
# Receive output from everything which outputs to same region, add to current input
self.logger.receiving_state(self.job_id)
self.pool.receive_state_increments_from_all(self.job_id, self.state,
self.outputstartindex, self.outputendindex)
# Send current input to everything which takes input from same region.
if operation.sync.HasField('set_up_to') and operation.sync.set_up_to:
self.logger.sending_state(self.job_id)
self.pool.send_state_up_to(self.job_id, self.state, self.inputstartindex, self.inputendindex,
operation.sync.set_up_to)
else:
self.logger.sending_state(self.job_id)
self.pool.send_state_to_all(self.job_id, self.state, self.inputstartindex, self.inputendindex)
else:
# If diagonal overwrite is on then don't send anything, just receive.
if not operation.sync.diagonal_overwrite:
# Swap input and output
# Send current output to worker along diagonal with in/out equal to our output
self.logger.sending_state(self.job_id)
self.pool.send_state_to_one(self.job_id, self.state,
self.outputstartindex, self.outputendindex,
self.outputstartindex, self.outputendindex)
should_receive = True
if operation.sync.HasField('set_up_to') and operation.sync.set_up_to:
should_receive = False
if operation.sync.set_up_to >= self.inputendindex and operation.sync.set_up_to >= self.outputendindex:
should_receive = True
if should_receive:
# Receive new input from worker with in/out equal to our input. Set to current input.
self.logger.receiving_state(self.job_id)
self.pool.receive_state_from_one(self.job_id, self.state,
self.inputstartindex, self.inputendindex,
self.inputstartindex, self.inputendindex)
else:
raise NotImplemented("Unknown operation: {}".format(operation))
# If didn't override report system (see measurement), report done.
self.logger.done_running_operation(self.job_id, operation)
self.serverapi.report_done(operation.job_id)
self.logger.closing_state(self.job_id)
self.pool.close_connections(self.job_id)
del self.state
def get_state_value(state: State) -> Union[numpy.ndarray, int]:
if state.HasField('vector'):
return pbvec_to_vec(state.vector)
else:
return state.index
class WorkerPoolServer(Thread):
def __init__(self, hostname: str = '0.0.0.0', port: int = 0, logger: WorkerLogger = None):
"""
Create a server and pool object for finding connections to other workers.
:param hostname: address to contact this worker
:param port: port to bind to (0 default means choose any open).
"""
super().__init__()
if logger is None:
self.logger = PrintLogger()
else:
self.logger = logger
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((hostname, port))
self.addr = hostname
self.port = self.sock.getsockname()[1] # don't use port in case it was 0
# map from (job_id, inputstart, inputend) to [worker_socket]
self.inputrange_workers = {}
# map from (job_id, outputstart, outputend) to [worker_socket]
self.outputrange_workers = {}
# Full range
self.workers = {}
self.workerlock = Lock()
def run(self):
self.logger.starting_server()
self.sock.listen(5)
self.logger.accepting_connections()
while True:
sock, _ = self.sock.accept()
sock = FormatSocket(sock)
workersetup = WorkerPartner.FromString(sock.recv())
self.logger.accepted_connection()
fullkey = (workersetup.job_id, workersetup.state_index_start, workersetup.state_index_end,
workersetup.output_index_start, workersetup.output_index_end)
inputkey = (workersetup.job_id, workersetup.state_index_start, workersetup.state_index_end)
outputkey = (workersetup.job_id, workersetup.output_index_start, workersetup.output_index_end)
with self.workerlock:
if inputkey not in self.inputrange_workers:
self.inputrange_workers[inputkey] = []
if outputkey not in self.outputrange_workers:
self.outputrange_workers[outputkey] = []
self.workers[fullkey] = sock
self.inputrange_workers[inputkey].append((sock, workersetup.output_index_start, workersetup.output_index_end))
self.outputrange_workers[outputkey].append((sock, workersetup.state_index_start, workersetup.state_index_end))
def send_state_to_one(self, job_id: str, state: CythonBackend, inputstart: int, inputend: int,
outputstart: int, outputend: int, sock: Optional[FormatSocket] = None):
sockkey = (job_id, inputstart, inputend, outputstart, outputend)
if sock is None:
with self.workerlock:
if sockkey in self.workers:
sock = self.workers[sockkey]
if sock is not None:
self.logger("Sending state to {}".format(sockkey))
self.send_state(job_id, state, sock)
else:
self.logger.log_error("Cannot send - No socket for {}".format(sockkey))
def send_state_to_all(self, job_id: str, state: CythonBackend, start: int, end: int,
inputdefined=True):
socks = []
rangekey = (job_id, start, end)
if inputdefined:
with self.workerlock:
if rangekey in self.inputrange_workers:
socks = self.inputrange_workers[rangekey]
else:
with self.workerlock:
if rangekey in self.outputrange_workers:
socks = self.outputrange_workers[rangekey]
self.logger("Sending state to {} workers...".format(len(socks)))
for i, (sock, _, _) in enumerate(socks):
self.logger("\tSending to worker #{}".format(i))
self.send_state(job_id, state, sock)
def send_state_up_to(self, job_id: str, state: CythonBackend, inputstart: int, inputend: int, threshold: int = 0):
rangekey = (job_id, inputstart, inputend)
with self.workerlock:
# We are either alone or broken.
if rangekey not in self.inputrange_workers:
return
for sock, outputstart, outputend in self.inputrange_workers[rangekey]:
if outputstart >= threshold:
continue
self.send_state(job_id, state, sock)
def receive_state_from_one(self, job_id: str, state: CythonBackend, inputstart: int, inputend: int,
outputstart: int, outputend: int):
sock = None
rangekey = (job_id, inputstart, inputend, outputstart, outputend)
with self.workerlock:
if rangekey in self.workers:
sock = self.workers[rangekey]
if sock is not None:
self.logger("Receiving state from {}".format(rangekey))
self.receive_state(job_id, state, sock, overwrite=True)
else:
self.logger.log_error("Cannot receive - No socket for {}".format(rangekey))
def receive_state_increments_from_all(self, job_id: str, state: CythonBackend,
start: int, end: int, inputdefined=False):
socks = []
rangekey = (job_id, start, end)
if inputdefined:
with self.workerlock:
if rangekey in self.inputrange_workers:
socks = self.inputrange_workers[rangekey]
else:
with self.workerlock:
if rangekey in self.outputrange_workers:
socks = self.outputrange_workers[rangekey]
self.logger("Receiving state from {} workers...".format(len(socks)))
for sock, _, _ in socks:
self.receive_state(job_id, state, sock, overwrite=False)
# TODO rewrite as cython/c extension to improve speed.
def send_state(self, job_id: str, state: StateType, sock: socket):
syncaccept = SyncAccept.FromString(sock.recv())
if syncaccept.job_id != job_id:
raise Exception("Job ids do not match: {}/{}".format(job_id, syncaccept.job_id))
inflight = 0
last_index = 0
total_size = state.get_state_size()
while last_index < total_size:
while inflight < syncaccept.max_inflight and last_index < total_size:
end_index = last_index + min(total_size - last_index, syncaccept.chunk_size)
syncstate = SyncState()
syncstate.job_id = job_id
syncstate.rel_start_index = last_index
vec_to_pbvec(state.get_relative_range(last_index, end_index), pbvec=syncstate.data)
inflight += 1
last_index = end_index
syncstate.done = last_index == total_size
sock.send(syncstate.SerializeToString())
# Overwrite syncaccept to allow live changes to inflight and chunk_size.
syncaccept = SyncAccept.FromString(sock.recv())
inflight -= 1
if syncaccept.job_id != job_id:
raise Exception("Job ids do not match: {}/{}".format(job_id, syncaccept.job_id))
# TODO rewrite as cython/c extension to improve speed.
def receive_state(self, job_id: str, state: CythonBackend, sock: socket, overwrite: bool = False,
chunk_size: int = 2048, max_inflight: int = 4):
syncaccept = SyncAccept()
syncaccept.job_id = job_id
syncaccept.chunk_size = chunk_size
syncaccept.max_inflight = max_inflight
# Let the other side know we are ready to receive and how to send info.
sock.send(syncaccept.SerializeToString())
running = True
while running:
syncstate = SyncState.FromString(sock.recv())
relstart = syncstate.rel_start_index
data = pbvec_to_vec(syncstate.data)
# Break abstraction barrier now
# TODO: unbreak it
if overwrite:
state.overwrite_relative_range(relstart, relstart+len(data), data)
# state.state[relstart:relstart+len(data)] = data
else:
state.addto_relative_range(relstart, relstart+len(data), data)
# state.state[relstart:relstart+len(data)] += data
sock.send(syncaccept.SerializeToString())
running = not syncstate.done
def make_connection(self, job_id: str, myinputstart: int, myinputend: int, myoutputstart: int, myoutputend: int,
partner: WorkerPartner):
self.logger("Connecting to: {}".format(str(partner)))
wp = WorkerPartner()
wp.job_id = job_id
wp.state_index_start = myinputstart
wp.state_index_end = myinputend
wp.output_index_start = myoutputstart
wp.output_index_end = myoutputend
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((partner.addr, partner.port))
sock = | |
= active_selection
logging.info('Toolkit: __init__ got active selection: {0}'
''.format(self.active_selection))
super(BaseListItemViewsOperationTool, self).__init__(app=app,
editor_widget=editor_widget,
command_widget=command_widget,
**kwargs)
def create_editor_widgets(self):
editor_widgets = collections.OrderedDict()
editor_widgets['line_tracer'] = LineTracer()
editor_widgets['line_tracer'].line_color = self.line_color
editor_widgets['line_tracer'].bind(points=self.final_select_applicable_objects)
if self.active_selection:
editor_widgets['line_tracer'].bind(on_touch_down=self.process_active_selection_start)
editor_widgets['line_tracer'].bind(on_touch_move=self.process_active_selection_move)
editor_widgets['line_tracer'].bind(on_touch_up=self.process_active_selection_end)
else:
logging.info('Toolkit: active selection not requested')
return editor_widgets
##########################################################################
# Active selection behavior
# _active_selection_object_touched = DictProperty(None, allownone=True)
# '''Maintains the list of objects that have been passed through and selected
# by the traced line itself. For these, we are sure they will get selected.'''
#
# _active_selection_object_in = DictProperty(None, allownone=True)
# '''Maintains the list of objects that are currently affected
# by the complement of the selection line (straigt line from start
# to end of current line), but have not been directly touched.'''
#
# _active_selection_object_passed = DictProperty(None, allownone=True)
# '''Maintains the list of objects that would currently get selected, if
# the on_touch_up() signal came, although they have not been directly
# touched and the complement line is not currently touching them.'''
_active_selection_slow_mode = BooleanProperty(False)
'''Slow mode: only check for active selection once every 10 touch_move
events.'''
_active_selection_slow_mode_threshold = NumericProperty(30000)
'''Empirically measured slow mode threshold.'''
_active_selection_slow_mode_counter = NumericProperty(1)
_active_selection_slow_mode_modulo = NumericProperty(10)
'''Active selection in slow mode should not take more than 0.001
seconds per move event. E.g., if one selection is taking 0.01 s,
then it has to be spread over 10 events. If it takes 0.5 s, it has
to be spread over 50 events.
The time is measured every time active selection is run in slow mode.
The slow_mode_modulo is adjusted accordingly and the counter is reset.
'''
_active_selection_target_time_per_event = NumericProperty(0.01)
'''The desired amortized time taken by the active selection
computation per on_touch_move event.'''
def process_active_selection_start(self, tracer, touch):
# Unselect all
if self.forgetful:
for v in self.available_views:
if v.is_selected:
v.dispatch('on_release')
def process_active_selection_move(self, tracer, touch):
# This has the problem that it only includes the collided
# objects, not those "inside"
# for v in self.available_views:
# if v.collide_point(touch.x, touch.y):
# if not v.is_selected:
# v.dispatch('on_release')
points = touch.ud['line'].points
m_points = self.editor_to_model_points(points)
# logging.debug('Active selection: {0} points in line'.format(len(m_points)))
self._process_active_selection_slow_mode(m_points)
if self._active_selection_slow_mode:
if self._active_selection_slow_mode_counter % self._active_selection_slow_mode_modulo == 0:
# Let's try running "experimental selection"
# logging.info('Active selection: checking in slow mode')
_t_start = time.clock()
self.provisional_select_applicable_objects(instance=None,
points=touch.ud['line'].points)
_t_end = time.clock()
time_taken = (_t_end - _t_start)
# Set new modulo so that the expected time per event is 0.001.
# Later on, this may cause noticeable lag in the selection, but
# hopefully not so much in the lasso.
self._active_selection_slow_mode_modulo = max(1, min(
int(old_div(time_taken, self._active_selection_target_time_per_event)), 30))
logging.debug('Active selection: time take: {0}, setting modulo to {1}'
''.format(time_taken, self._active_selection_slow_mode_modulo))
self._active_selection_slow_mode_counter = 1
else:
self._active_selection_slow_mode_counter += 1
else:
logging.debug('Active selection: checking in normal mode')
self.provisional_select_applicable_objects(instance=None,
points=touch.ud['line'].points)
def process_active_selection_end(self, tracer, touch):
pass
def _process_active_selection_slow_mode(self, points):
m_points_x, m_points_y = list(zip(*points))
xmin, xmax = min(m_points_x), max(m_points_x)
ymin, ymax = min(m_points_y), max(m_points_y)
# logging.debug('Active selection: checking slow mode with range'
# ' ({0}, {1}), ({2}, {3})'
# ''.format(xmin, xmax, ymin, ymax))
is_slow = True
# if (xmax - xmin) * (ymax - ymin) < self._active_selection_slow_mode_threshold:
# is_slow = False
if self._active_selection_slow_mode != is_slow:
logging.debug('Active selection: slow mode: changing to {0}'.format(is_slow))
self._active_selection_slow_mode_counter = 1
self._active_selection_slow_mode = is_slow
##########################################################################
# Computing the selection from a set of points
def provisional_select_applicable_objects(self, instance, points):
self.select_applicable_objects(instance, points, do_clear_tracer=False)
def final_select_applicable_objects(self, instance, points):
self.select_applicable_objects(instance, points, do_clear_tracer=True)
def select_applicable_objects(self, instance, points, do_clear_tracer=True):
raise NotImplementedError()
@property
def list_view(self):
raise NotImplementedError()
@property
def available_views(self):
return [c for c in self.list_view.container.children[:]]
def apply_operation(self, item_view):
"""Override this method in child Tools to make this actually
do something to the overlapping CropObjectViews."""
pass
class CropObjectViewsSelectTool(BaseListItemViewsOperationTool):
"""Select the activated CropObjectViews."""
def __init__(self, ignore_staff=False, **kwargs):
super(CropObjectViewsSelectTool, self).__init__(**kwargs)
self.ignore_staff = ignore_staff
forgetful = BooleanProperty(True)
'''If True, will always forget prior selection. If False, will
be "additive".'''
line_color = ListProperty([1.0, 0.5, 1.0])
def select_applicable_objects(self, instance, points, do_clear_tracer=True):
# Get the model mask
_t_start = time.clock()
m_points = self.editor_to_model_points(points)
_t_points = time.clock()
# filtered_m_points = self._filter_polygon_points_to_relevant_for_selection(m_points)
# Possible speedup: discard points that cannot have any further
# effect on object selection/deselection.
# The polygon() implementation is already algorithmically
# efficient.
model_mask = self.model_mask_from_points(m_points)
_t_middle = time.clock()
# Find all CropObjects that overlap
objids = [objid for objid, c in self._model.cropobjects.items()
if image_mask_overlaps_cropobject(model_mask, c,
use_cropobject_mask=self.use_mask_to_determine_selection)]
if self.ignore_staff:
objids = [objid for objid in objids
if self._model.cropobjects[objid].clsname not in _CONST.STAFF_CROPOBJECT_CLSNAMES]
_t_end = time.clock()
# logging.info('select_applicable_objects: points and mask took'
# ' {0:.5f} ({1:.5f}/{2:.5f}, collision checks took {3:.5f}'
# ''.format(_t_middle - _t_start,
# _t_points - _t_start, _t_middle - _t_points,
# _t_end - _t_middle))
if do_clear_tracer:
logging.info('select_applicable_objects: clearing tracer')
self.editor_widgets['line_tracer'].clear()
# Unselect
if self.forgetful:
for v in self.available_views:
if v.is_selected:
v.dispatch('on_release')
# Mark their views as selected
applicable_views = [v for v in self.available_views
if v.objid in objids]
logging.info('select_applicable_objects: found {0} objects'
''.format(len(applicable_views)))
for c in applicable_views:
self.apply_operation(c)
def apply_operation(self, item_view):
if not item_view.is_selected:
# item_view.select()
item_view.dispatch('on_release')
@property
def list_view(self):
return self.app_ref.cropobject_list_renderer.view
# @property
# def available_views(self):
# return self.list_view.rendered_views
def _filter_polygon_points_to_relevant_for_selection(self, m_points):
"""We can filter out some points in the polygon if we can be
sure that if we leave them out, the final selection will be
the same as if we left them in."""
return m_points
class EdgeViewsSelectTool(BaseListItemViewsOperationTool):
"""Selects all edges that lead to/from CropObjects overlapped
by the selection."""
line_color = ListProperty([1.0, 0.0, 0.0])
def select_applicable_objects(self, instance, points, do_clear_tracer=True):
# Get the model mask
m_points = self.editor_to_model_points(points)
model_mask = self.model_mask_from_points(m_points)
# Find all Edges that overlap
objid_pairs = []
for e in self.available_views:
# logging.info('Edge {0} --> {1}'.format(e.edge[0], e.edge[1]))
c_start = self._model.cropobjects[e.start_objid]
c_end = self._model.cropobjects[e.end_objid]
if c_start.objid == 224 and c_end.objid == 225:
sx, sy = c_start.middle
ex, ey = c_end.middle
mx, my = old_div((sx + ex), 2), old_div((sy + ey), 2)
logging.warn('Edge 224 --> 225: middle points '
'{0}, {1} -- mask: {2}'
''.format(c_start.middle, c_end.middle,
model_mask[mx, my]))
# This is a little hack-ish, because the assumptions about
# what is up and what is left are wrong...?
if image_mask_overlaps_model_edge(model_mask,
c_start.middle,
c_end.middle):
objid_pairs.append((e.start_objid, e.end_objid))
# Find all CropObjects that overlap
# objids = [objid for objid, c in self._model.cropobjects.iteritems()
# if image_mask_overlaps_cropobject(model_mask, c,
# use_cropobject_mask=self.use_mask_to_determine_selection)]
if do_clear_tracer:
self.editor_widgets['line_tracer'].clear()
# Mark their views as selected
applicable_views = [v for v in self.available_views
if v.edge in objid_pairs]
for c in applicable_views:
self.apply_operation(c)
def apply_operation(self, item_view):
if not item_view.is_selected:
item_view.dispatch('on_release')
@property
def list_view(self):
return self.app_ref.graph_renderer.view
# @property
# def available_views(self):
# return self.list_view.rendered_views
class CropObjectViewsParseTool(CropObjectViewsSelectTool):
def select_applicable_objects(self, instance, points, do_clear_tracer=True):
super(CropObjectViewsParseTool, self).select_applicable_objects(instance, points,
do_clear_tracer=do_clear_tracer)
self.list_view.parse_current_selection()
###############################################################################
# NOT IMPLEMENTED
class NoteSelectTool(AddSymbolTool):
"""Given a bounding box, splits it into a stem and notehead bounding box.
[NOT IMPLEMENTED]"""
current_cropobject_selection = ObjectProperty(None)
def create_editor_widgets(self):
editor_widgets = collections.OrderedDict()
editor_widgets['bbox_tracer'] = ConnectedComponentBoundingBoxTracer()
editor_widgets['bbox_tracer'].bind(current_finished_bbox=self.process_note)
def process_note(self):
raise NotImplementedError()
current_postprocessed_bbox = self.editor_widgets['bbox_tracer'].current_postprocessed_bbox
self.current_cropobject_selection = current_postprocessed_bbox
###############################################################################
# Image processing tools
class RegionBinarizeTool(MUSCIMarkerTool):
"""Binarize the region in the bounding box using Otsu binarization."""
def __init__(self, retain_foreground, **kwargs):
super(RegionBinarizeTool, self).__init__(**kwargs)
self.retain_foreground = retain_foreground
def create_editor_widgets(self):
editor_widgets = collections.OrderedDict()
editor_widgets['bbox_tracer'] = BoundingBoxTracer()
editor_widgets['bbox_tracer'].bind(
current_finished_bbox=self.binarize)
return editor_widgets
def binarize(self, instance, pos):
"""Binarize the selected region and update the annotated image."""
# Get model bbox
ed_t, ed_l, ed_b, ed_r = pos['top'], pos['left'], \
pos['bottom'], pos['right']
m_t, m_l, m_b, m_r = self.editor_to_model_bbox(ed_t, ed_l, ed_b, ed_r)
m_t, m_l, m_b, m_r = bbox_to_integer_bounds(m_t, m_l, m_b, m_r)
_binarization_start = time.clock()
# Crop and binarize
image = self.app_ref.annot_model.image * 1
crop = image[m_t:m_b, m_l:m_r] * 1
if crop.sum() == 0:
logging.info('RegionBinarizeTool: selected single-color region,'
' cannot binarize anything.')
self.editor_widgets['bbox_tracer'].clear()
return
nnz_crop = crop.ravel()[numpy.flatnonzero(crop)]
nnz_crop_threshold = threshold_otsu(nnz_crop)
# binarized_crop_threshold = threshold_otsu(crop)
crop[crop < nnz_crop_threshold] = 0
if not self.retain_foreground:
crop[crop >= nnz_crop_threshold] = 255
output_crop = crop
# sauvola_thresholds = threshold_sauvola(crop)
# sauvola_mask = crop > sauvola_thresholds
# output_crop = sauvola_mask * crop
image[m_t:m_b, m_l:m_r] = output_crop
_update_start = time.clock()
# Update image
self.app_ref.update_image(image)
_binarization_end = time.clock()
logging.info('RegionBinarizeTool: binarization took {0:.3f} s,'
' image update took {1:.3f} s'
''.format(_update_start - _binarization_start,
_binarization_end - _update_start))
# Clean up
self.editor_widgets['bbox_tracer'].clear()
class BackgroundLassoTool(LassoBoundingBoxSelectTool):
"""Set the selected area as image background."""
def on_current_cropobject_model_selection(self, instance, pos):
# Ask | |
are ordered by time, with newest backups first. If you do not specify a BackupId or ServerName, the command returns all backups.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the backup does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeBackups>`_
**Request Syntax**
::
response = client.describe_backups(
BackupId='string',
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Backups': [
{
'BackupArn': 'string',
'BackupId': 'string',
'BackupType': 'AUTOMATED'|'MANUAL',
'CreatedAt': datetime(2015, 1, 1),
'Description': 'string',
'Engine': 'string',
'EngineModel': 'string',
'EngineVersion': 'string',
'InstanceProfileArn': 'string',
'InstanceType': 'string',
'KeyPair': 'string',
'PreferredBackupWindow': 'string',
'PreferredMaintenanceWindow': 'string',
'S3DataSize': 123,
'S3DataUrl': 'string',
'S3LogUrl': 'string',
'SecurityGroupIds': [
'string',
],
'ServerName': 'string',
'ServiceRoleArn': 'string',
'Status': 'IN_PROGRESS'|'OK'|'FAILED'|'DELETING',
'StatusDescription': 'string',
'SubnetIds': [
'string',
],
'ToolsVersion': 'string',
'UserArn': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Backups** *(list) --*
Contains the response to a ``DescribeBackups`` request.
- *(dict) --*
Describes a single backup.
- **BackupArn** *(string) --*
The ARN of the backup.
- **BackupId** *(string) --*
The generated ID of the backup. Example: ``myServerName-yyyyMMddHHmmssSSS``
- **BackupType** *(string) --*
The backup type. Valid values are ``automated`` or ``manual`` .
- **CreatedAt** *(datetime) --*
The time stamp when the backup was created in the database. Example: ``2016-07-29T13:38:47.520Z``
- **Description** *(string) --*
A user-provided description for a manual backup. This field is empty for automated backups.
- **Engine** *(string) --*
The engine type that is obtained from the server when the backup is created.
- **EngineModel** *(string) --*
The engine model that is obtained from the server when the backup is created.
- **EngineVersion** *(string) --*
The engine version that is obtained from the server when the backup is created.
- **InstanceProfileArn** *(string) --*
The EC2 instance profile ARN that is obtained from the server when the backup is created. Because this value is stored, you are not required to provide the InstanceProfileArn again if you restore a backup.
- **InstanceType** *(string) --*
The instance type that is obtained from the server when the backup is created.
- **KeyPair** *(string) --*
The key pair that is obtained from the server when the backup is created.
- **PreferredBackupWindow** *(string) --*
The preferred backup period that is obtained from the server when the backup is created.
- **PreferredMaintenanceWindow** *(string) --*
The preferred maintenance period that is obtained from the server when the backup is created.
- **S3DataSize** *(integer) --*
This field is deprecated and is no longer used.
- **S3DataUrl** *(string) --*
This field is deprecated and is no longer used.
- **S3LogUrl** *(string) --*
The Amazon S3 URL of the backup's log file.
- **SecurityGroupIds** *(list) --*
The security group IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ServerName** *(string) --*
The name of the server from which the backup was made.
- **ServiceRoleArn** *(string) --*
The service role ARN that is obtained from the server when the backup is created.
- **Status** *(string) --*
The status of a backup while in progress.
- **StatusDescription** *(string) --*
An informational message about backup status.
- **SubnetIds** *(list) --*
The subnet IDs that are obtained from the server when the backup is created.
- *(string) --*
- **ToolsVersion** *(string) --*
The version of AWS OpsWorks CM-specific tools that is obtained from the server when the backup is created.
- **UserArn** *(string) --*
The IAM user ARN of the requester for manual backups. This field is empty for automated backups.
- **NextToken** *(string) --*
This is not currently implemented for ``DescribeBackups`` requests.
:type BackupId: string
:param BackupId:
Describes a single backup.
:type ServerName: string
:param ServerName:
Returns backups for the server with the specified ServerName.
:type NextToken: string
:param NextToken:
This is not currently implemented for ``DescribeBackups`` requests.
:type MaxResults: integer
:param MaxResults:
This is not currently implemented for ``DescribeBackups`` requests.
:rtype: dict
:returns:
"""
pass
def describe_events(self, ServerName: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Describes events for a specified server. Results are ordered by time, with newest events first.
This operation is synchronous.
A ``ResourceNotFoundException`` is thrown when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeEvents>`_
**Request Syntax**
::
response = client.describe_events(
ServerName='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'ServerEvents': [
{
'CreatedAt': datetime(2015, 1, 1),
'ServerName': 'string',
'Message': 'string',
'LogUrl': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **ServerEvents** *(list) --*
Contains the response to a ``DescribeEvents`` request.
- *(dict) --*
An event that is related to the server, such as the start of maintenance or backup.
- **CreatedAt** *(datetime) --*
The time when the event occurred.
- **ServerName** *(string) --*
The name of the server on or for which the event occurred.
- **Message** *(string) --*
A human-readable informational or status message.
- **LogUrl** *(string) --*
The Amazon S3 URL of the event's log file.
- **NextToken** *(string) --*
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object's ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type ServerName: string
:param ServerName: **[REQUIRED]**
The name of the server for which you want to view events.
:type NextToken: string
:param NextToken:
NextToken is a string that is returned in some command responses. It indicates that not all entries have been returned, and that you must run at least one more request to get remaining items. To get remaining results, call ``DescribeEvents`` again, and assign the token from the previous results as the value of the ``nextToken`` parameter. If there are no more results, the response object\'s ``nextToken`` parameter value is ``null`` . Setting a ``nextToken`` value that was not returned in your previous results causes an ``InvalidNextTokenException`` to occur.
:type MaxResults: integer
:param MaxResults:
To receive a paginated response, use this parameter to specify the maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a ``NextToken`` value that you can assign to the ``NextToken`` request parameter to get the next set of results.
:rtype: dict
:returns:
"""
pass
def describe_node_association_status(self, NodeAssociationStatusToken: str, ServerName: str) -> Dict:
"""
Returns the current status of an existing association or disassociation request.
A ``ResourceNotFoundException`` is thrown when no recent association or disassociation request with the specified token is found, or when the server does not exist. A ``ValidationException`` is raised when parameters of the request are not valid.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/opsworkscm-2016-11-01/DescribeNodeAssociationStatus>`_
**Request Syntax**
::
response = client.describe_node_association_status(
NodeAssociationStatusToken='string',
ServerName='string'
)
**Response Syntax**
::
{
'NodeAssociationStatus': 'SUCCESS'|'FAILED'|'IN_PROGRESS',
'EngineAttributes': [
{
'Name': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **NodeAssociationStatus** *(string) --*
The status of the association or disassociation request.
**Possible values:**
* ``SUCCESS`` | |
on devices that directly access hardware, and therefore
# should have their actions simulated.
hardware_access = True
# This is set by NICOS to indicate that do-methods should be intercepted
# and their result simulated. Combines hardware_access and device mode
# at runtime.
_sim_intercept = True
# Autogenerated inventory of the class' user methods.
methods = {}
# Loop delay defaults. To be set to low values in the test suite.
_base_loop_delay = 0.1
_long_loop_delay = 0.5
def __init__(self, name, **config):
# register self in device registry
if not self.temporary:
if name in session.devices:
raise ProgrammingError('device with name %s already exists' %
name)
session.devices[name] = self
session.device_case_map[name.lower()] = name
self._name = name
# _config: device configuration (all parameter names lower-case)
self._config = {name.lower(): value for (name, value) in config.items()}
# _params: parameter values from config
self._params = {'name': name}
# _infoparams: cached list of parameters to get on info()
self._infoparams = []
# _adevs: "attached" device instances
self._adevs = {}
# superdevs: reverse adevs for dependency tracking
self._sdevs = set()
# keep an explicit record of controllers
self._controllers = set()
# execution mode
self._mode = session.mode
# initialize a logger for the device
self.__dict__['log'] = session.getLogger(name)
try:
# initialize device
self.init()
except BaseException as err:
try:
self.shutdown()
except Exception:
self.log.warning('could not shutdown after creation failed',
exc=1)
raise err
attribute_whitelist = {
'valuetype', # for all devices
'arraydesc', # for image producers
'autodevices', # for HasAutoDevices
}
def __setattr__(self, name, value):
# disallow modification of any public attributes that are not
# parameters or otherwise properties
if name[0] != '_' and name not in self.attribute_whitelist:
obj = getattr(self.__class__, name, Ellipsis)
if obj is Ellipsis:
raise UsageError(self, 'device has no parameter %s, use '
'ListParams(%s) to show all' % (name, self))
elif inspect.isroutine(obj):
raise UsageError(self, '%s cannot be assigned; it is a device '
'method' % name)
elif not isinstance(obj, property):
# this should also be forbidden at some point, but for now just
# emit a warning to make sure it doesn't break the world
self.log.warning('Setting a non-parameter attribute %s.%s: '
'this is deprecated and will be an error '
'in a future version.', self, name)
object.__setattr__(self, name, value)
def __str__(self):
return self._name
def __repr__(self):
if not self.description:
return '<device %s (a %s.%s)>' % (self._name,
self.__class__.__module__,
self.__class__.__name__)
return '<device %s "%s" (a %s.%s)>' % (self._name,
self.description,
self.__class__.__module__,
self.__class__.__name__)
def __reduce__(self):
# Used for pickling the device e.g. when sending between daemon and GUI
return (str, (self._name,))
def doReadName(self):
return self._name
def doReadClasses(self):
return [c.__module__ + '.' + c.__name__ for c in self.__class__.__mro__]
def doUpdateLoglevel(self, value):
if session.sessiontype == POLLER:
# suppress debug/info messages from ordinary devices in the poller
self.log.setLevel(loggers.WARNING)
else:
self.log.setLevel(loggers.loglevels[value])
def _attachDevices(self):
"""Validate and create attached devices."""
for aname, entry in sorted(self.attached_devices.items()):
if not isinstance(entry, Attach):
raise ProgrammingError(self, 'attached device entry for %r is '
'invalid; the value should be a '
'nicos.core.Attach object' % aname)
value = self._config.pop(aname, None)
devlist = []
class_needed = entry.devclass
if self._mode == SIMULATION:
# need to relax this instance check for simulation mode;
# aliases are not yet set correctly when the devices are
# created
class_needed = object
for i, devname in enumerate(entry.check(self, aname, value)):
if (devname is None) or \
(devname not in session.configured_devices and entry.missingok):
devlist.append(None)
continue
try:
dev = session.getDevice(devname, class_needed, source=self)
except UsageError:
raise ConfigurationError(
self, 'device %r item %d has wrong type (should be %s)' %
(aname, i + 1, entry.devclass.__name__)) from None
devlist.append(dev)
for dev in devlist:
if dev is not None:
dev._sdevs.add(self._name)
if isinstance(self, IsController):
dev._controllers.add(self._name)
self.__dict__['_attached_%s' % aname] = self._adevs[aname] = \
devlist[0] if entry.single else devlist
def init(self):
"""Initialize the object; this is called by the NICOS system when the
device instance has been created.
This method first initializes all attached devices (creating them if
necessary), then initializes parameters.
.. XXX expand parameter init procedure
.. method:: doPreinit(mode)
This method, if present, is called before parameters are initialized
(except for parameters that have the ``preinit`` property set to
true).
This allows to initialize a hardware connection if it is necessary
for the various ``doRead...()`` methods of other parameters that
read the current parameter value from the hardware.
.. method:: doInit(mode)
This method, if present, is called after all parameters have been
initialized. It is the correct place to set up additional
attributes, or to perform initial (read-only!) communication with
the hardware.
.. note:: ``doPreinit()`` and ``doInit()`` are called regardless of the
current execution mode. This means that if one of these methods
does hardware access, it must be done only if ``mode != SIMULATION``.
"""
self._cache = None
self._subscriptions = []
self._attachDevices()
self._cache = self._getCache()
lastconfig = None
if self._cache:
lastconfig = self._cache.get('_lastconfig_', self._name, None)
old_classes = self._cache.get(self, 'classes')
new_classes = self.doReadClasses()
if old_classes != new_classes and self._mode == MASTER:
self._cache.put(self, 'classes', new_classes)
def _init_param(param, paraminfo):
param = param.lower()
# mandatory parameters must be in config, regardless of cache
if paraminfo.mandatory and param not in self._config:
raise ConfigurationError(self, 'missing configuration '
'parameter %r' % param)
# Ellipsis representing "no value" since None is a valid value for
# some parameters
value = Ellipsis
# try to get from cache
if self._cache:
value = self._cache.get(self, param, Ellipsis)
if param == 'name': # clean up legacy, wrong values
self._cache.put(self, 'name', self._name)
value = self._name
if value is not Ellipsis:
try:
value = self._validateType(value, param, paraminfo)
except ConfigurationError as e:
self.log.warning('value of %s from cache (%r) is '
'invalid: %r using default handling '
'instead.', param, value, e)
value = Ellipsis
if value is not Ellipsis:
if param in self._ownparams:
self._params[param] = value
return
if param in self._config:
cfgvalue = self._validateType(self._config[param], param)
if cfgvalue != value:
valuestr = self.formatParam(param, value)
cfgvstr = self.formatParam(param, cfgvalue)
prefercache = paraminfo.prefercache
if prefercache is None:
prefercache = paraminfo.settable
if lastconfig and lastconfig.get(param) != cfgvalue:
self.log.warning(
"value of '%s' from cache (%s) differs from "
'configured value (%s), using configured '
'since it was changed in the setup file',
param, valuestr, cfgvstr)
value = cfgvalue
self._cache.put(self, param, value)
elif prefercache:
self.log.warning(
"value of '%s' from cache (%s) differs from "
'configured value (%s), using cached',
param, valuestr, cfgvstr)
else:
self.log.warning(
"value of '%s' from cache (%s) differs from "
'configured value (%s), using configured',
param, valuestr, cfgvstr)
value = cfgvalue
self._cache.put(self, param, value)
elif not paraminfo.settable and paraminfo.prefercache is False:
# parameter is in cache, but not in config: if it is not
# settable and has a default, use that (since most probably
# the default is intended to be used but has changed)
defvalue = paraminfo.default
if defvalue != value:
defvalue = self._validateType(defvalue, param,
paraminfo)
if defvalue != value:
valuestr = self.formatParam(param, value)
defvstr = self.formatParam(param, paraminfo.default)
self.log.warning(
"value of '%s' from cache (%s) differs from "
'default value (%s), using default',
param, valuestr, defvstr)
value = paraminfo.default
self._cache.put(self, param, value)
umethod = getattr(self, 'doUpdate' + param.title(), None)
if umethod:
umethod(value)
self._params[param] = value
else:
self._initParam(param, paraminfo)
notfromcache.append(param)
if param in self._config and paraminfo.internal:
self.log.warning(
"'%s' is configured in a setup file although "
"declared as internal parameter",
param
)
if paraminfo.category is not None:
if paraminfo.category not in ALLOWED_CATEGORIES:
self.log.error('parameter %s uses illegal category %r!',
param, paraminfo.category)
else:
self._infoparams.append((paraminfo.category, param,
paraminfo.unit))
# end of _init_param()
notfromcache = []
later = []
for param, paraminfo in self.parameters.items():
if paraminfo.preinit:
_init_param(param, paraminfo)
else:
later.append((param, paraminfo))
if hasattr(self, 'doPreinit'):
self.doPreinit(self._mode)
for param, paraminfo in later:
_init_param(param, paraminfo)
# warn about parameters that weren't present in cache
if self._cache and notfromcache:
self.log.info('these parameters were not present in cache: %s',
', '.join(notfromcache))
self._infoparams.sort()
# subscribe to parameter value updates, if a doUpdate method exists
if self._cache:
for param in self.parameters:
umethod = getattr(self, 'doUpdate' + param.title(), None)
if umethod:
def updateparam(key, value, time, umethod=umethod):
umethod(value)
self._cache.addCallback(self, param, updateparam)
self._subscriptions.append((param, updateparam))
if self._cache:
| |
<gh_stars>0
# -*- coding:utf-8 -*-
__author__ = 'q00222<EMAIL>'
import os
import aws_util
import json
import log as logger
import aws_access_cloud_data_handler as data_handler
_install_conf = os.path.join("/home/hybrid_cloud/conf",
'aws_access_cloud_install.conf')
def _read_install_conf():
if not os.path.exists(_install_conf):
logger.error("read %s : No such file." % _install_conf)
return None
with open(_install_conf, 'r+') as fd:
tmp = fd.read()
return json.loads(tmp)
def _write_install_conf():
install_conf = {"cascaded_image": "OpenStack-B111T-v0.994",
"cascaded_vm_type": "c3.xlarge",
"vpn_image": "hybrid-cloud-vpn-cascaded_v0.1",
"vpn_vm_type": "t2.micro",
"hynode_image": "hypernode_v3_019",
"v2v_image": "V2V_Gateway",
"v2v_vm_type": "t2.micro",
"ceph_image": "ceph_template_10.22",
"ceph_vm_type": "t2.micro"}
with open(_install_conf, 'w+') as fd:
fd.write(json.dumps(install_conf, indent=4))
return install_conf
class AWSCascadedInstaller(object):
def __init__(self, cloud_id, access_key=None, secret_key=None,
region=None, az=None):
self.cloud_id = cloud_id
self.region = region
self.az = az
self.access_key = access_key
self.secret_key = secret_key
self.installer = aws_util.AWSInstaller(access_key,
secret_key, region,
az)
install_conf = _read_install_conf()
self.cascaded_image = install_conf["cascaded_image"]
self.cascaded_vm_type = install_conf["cascaded_vm_type"]
self.vpn_image = install_conf["vpn_image"]
self.vpn_vm_type = install_conf["vpn_vm_type"]
self.hynode_image = install_conf["hynode_image"]
self.v2v_image = install_conf["v2v_image"]
self.v2v_vm_type = install_conf["v2v_vm_type"]
self.ceph_image = install_conf["ceph_image"]
self.ceph_vm_type = install_conf["ceph_vm_type"]
self.vpc_id = None
self.debug_subnet_cidr = None
self.debug_subnet_id = None
self.base_subnet_cidr = None
self.base_subnet_id = None
self.api_subnet_cidr = None
self.api_subnet_id = None
self.tunnel_subnet_cidr = None
self.tunnel_subnet_id = None
self.ceph_subnet_cidr = None
self.ceph_subnet_id = None
self.gateway_id = None
self.rtb_id = None
self.cascaded_vm = None
self.cascaded_vm_id = None
self.cascaded_debug_ip = None
self.cascaded_debug_interface_id = None
self.cascaded_base_ip = None
self.cascaded_base_interface_id = None
self.cascaded_api_ip = None
self.cascaded_api_interface_id = None
self.cascaded_tunnel_ip = None
self.cascaded_tunnel_interface_id = None
self.cascaded_eip_public_ip = None
self.cascaded_eip_allocation_id = None
self.vpn_vm = None
self.vpn_vm_id = None
self.vpn_api_ip = None
self.vpn_tunnel_ip = None
self.vpn_eip_public_ip = None
self.vpn_eip_allocation_id = None
self.vpn_api_interface_id = None
self.vpn_tunnel_interface_id = None
self.v2v_vm_id = None
self.v2v_ip = None
self.hynode_image_id = None
self.ceph_deploy_vm_id = None
self.ceph_deploy_ip = None
self.ceph_deploy_interface_id = None
self.ceph_node1_vm_id = None
self.ceph_node1_ip = None
self.ceph_node1_interface_id = None
self.ceph_node2_vm_id = None
self.ceph_node2_ip = None
self.ceph_node2_interface_id = None
self.ceph_node3_vm_id = None
self.ceph_node3_ip = None
self.ceph_node3_interface_id = None
self.ext_net_eips = {}
self._read_aws_access_cloud()
def _read_aws_access_cloud(self):
cloud_info = data_handler.get_aws_access_cloud(self.cloud_id)
if not cloud_info:
return
if "vpc" in cloud_info.keys():
vpc_info = cloud_info["vpc"]
self.vpc_id = vpc_info["vpc_id"]
self.debug_subnet_cidr = vpc_info["debug_subnet_cidr"]
self.debug_subnet_id = vpc_info["debug_subnet_id"]
self.base_subnet_cidr = vpc_info["base_subnet_cidr"]
self.base_subnet_id = vpc_info["base_subnet_id"]
self.api_subnet_cidr = vpc_info["api_subnet_cidr"]
self.api_subnet_id = vpc_info["api_subnet_id"]
self.tunnel_subnet_cidr = vpc_info["tunnel_subnet_cidr"]
self.tunnel_subnet_id = vpc_info["tunnel_subnet_id"]
self.ceph_subnet_cidr = vpc_info["ceph_subnet_cidr"]
self.ceph_subnet_id = vpc_info["ceph_subnet_id"]
self.gateway_id = vpc_info["gateway_id"]
self.rtb_id = vpc_info["rtb_id"]
if "cascaded" in cloud_info.keys():
cascaded_info = cloud_info["cascaded"]
self.cascaded_vm_id = cascaded_info["vm_id"]
self.cascaded_debug_ip = cascaded_info["debug_ip"]
self.cascaded_debug_interface_id = cascaded_info["debug_interface_id"]
self.cascaded_base_ip = cascaded_info["base_ip"]
self.cascaded_base_interface_id = cascaded_info["base_interface_id"]
self.cascaded_api_ip = cascaded_info["api_ip"]
self.cascaded_api_interface_id = cascaded_info["api_interface_id"]
self.cascaded_tunnel_ip = cascaded_info["tunnel_ip"]
self.cascaded_tunnel_interface_id = cascaded_info["tunnel_interface_id"]
self.cascaded_eip_public_ip = cascaded_info["eip_public_ip"]
self.cascaded_eip_allocation_id = cascaded_info["eip_allocation_id"]
if "vpn" in cloud_info.keys():
vpn_info = cloud_info["vpn"]
self.vpn_vm_id = vpn_info["vm_id"]
self.vpn_api_ip = vpn_info["api_ip"]
self.vpn_tunnel_ip = vpn_info["tunnel_ip"]
self.vpn_eip_public_ip = vpn_info["eip_public_ip"]
self.vpn_eip_allocation_id = vpn_info["eip_allocation_id"]
self.vpn_api_interface_id = vpn_info["api_interface_id"]
self.vpn_tunnel_interface_id = vpn_info["tunnel_interface_id"]
if "v2v_gateway" in cloud_info.keys():
v2v_info = cloud_info["v2v_gateway"]
self.v2v_vm_id = v2v_info["vm_id"]
self.v2v_ip = v2v_info["ip"]
if "hynode" in cloud_info.keys():
hynode_info = cloud_info["hynode"]
self.hynode_image_id = hynode_info["image_id"]
if "ceph_cluster" in cloud_info.keys():
ceph_cluster_info = cloud_info["ceph_cluster"]
self.ceph_deploy_vm_id = ceph_cluster_info["deploy_vm_id"]
self.ceph_deploy_ip = ceph_cluster_info["deploy_ip"]
self.ceph_node1_vm_id = ceph_cluster_info["node1_vm_id"]
self.ceph_node1_ip = ceph_cluster_info["node1_ip"]
self.ceph_node2_vm_id = ceph_cluster_info["node2_vm_id"]
self.ceph_node2_ip = ceph_cluster_info["node2_ip"]
self.ceph_node3_vm_id = ceph_cluster_info["node3_vm_id"]
self.ceph_node3_ip = ceph_cluster_info["node3_ip"]
if "ext_net_eips" in cloud_info.keys():
self.ext_net_eips = cloud_info["ext_net_eips"]
def _get_cascaded_ip(self, debug_cidr_block, base_cidr_block,
api_cidr_block, tunnel_cidr_block):
ip_list = debug_cidr_block.split(".")
self.cascaded_debug_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "12"])
ip_list = base_cidr_block.split(".")
self.cascaded_base_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "12"])
ip_list = api_cidr_block.split(".")
self.cascaded_api_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "150"])
ip_list = tunnel_cidr_block.split(".")
self.cascaded_tunnel_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "12"])
def _get_vpn_ip(self, api_cidr_block, tunnel_cidr_block):
ip_list = api_cidr_block.split(".")
self.vpn_api_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "254"])
ip_list = tunnel_cidr_block.split(".")
self.vpn_tunnel_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "254"])
def _get_v2v_ip(self, api_cidr_block):
ip_list = api_cidr_block.split(".")
self.v2v_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "253"])
def _get_ceph_cluster_ip(self, ceph_cidr_block):
ip_list = ceph_cidr_block.split(".")
self.ceph_deploy_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "249"])
self.ceph_node1_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "250"])
self.ceph_node2_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "251"])
self.ceph_node3_ip = ".".join(
[ip_list[0], ip_list[1], ip_list[2], "252"])
def install_vpc(self, vpc_cidr, debug_cidr, base_cidr,
api_cidr, tunnel_cidr, ceph_cidr, green_ips):
if self.vpc_id:
return
# install vpc
vpc = self.installer.create_vpc(vpc_cidr)
self.vpc_id = vpc.id
self.installer.associate_dhcp_options("default", vpc.id)
# add internet gateway
self.gateway_id = self.installer.create_internet_gateway()
self.installer.attach_internet_gateway(self.gateway_id, self.vpc_id)
# get route table id, every vpc only have one route table.
route_tables = self.installer.get_all_route_tables(self.vpc_id)
self.rtb_id = route_tables[0].id
self.installer.create_route(self.rtb_id, "0.0.0.0/0",
gateway_id=self.gateway_id)
# install subnet
self.debug_subnet_cidr = debug_cidr
self.debug_subnet_id = self.installer.create_subnet(self.vpc_id,
debug_cidr)
self.base_subnet_cidr = base_cidr
self.base_subnet_id = self.installer.create_subnet(self.vpc_id,
base_cidr)
self.api_subnet_cidr = api_cidr
self.api_subnet_id = self.installer.create_subnet(self.vpc_id,
api_cidr)
self.tunnel_subnet_cidr = tunnel_cidr
self.tunnel_subnet_id = self.installer.create_subnet(self.vpc_id,
tunnel_cidr)
self.ceph_subnet_cidr = ceph_cidr
self.ceph_subnet_id = self.api_subnet_id
for ip in green_ips:
self.add_security("%s/32" % ip)
data_handler.write_vpc(self.cloud_id,
self.vpc_id,
self.debug_subnet_cidr, self.debug_subnet_id,
self.base_subnet_cidr, self.base_subnet_id,
self.api_subnet_cidr, self.api_subnet_id,
self.tunnel_subnet_cidr, self.tunnel_subnet_id,
self.ceph_subnet_cidr, self.ceph_subnet_id,
self.gateway_id, self.rtb_id)
def install_cascaded(self, debug_cidr_block, base_cidr_block,
api_cidr_block, tunnel_cidr_block):
if self.cascaded_vm_id:
return
# install cascaded vm
self._get_cascaded_ip(debug_cidr_block, base_cidr_block,
api_cidr_block, tunnel_cidr_block)
cascaded_debug_en = aws_util.AWSInterface(
self.debug_subnet_id, self.cascaded_debug_ip)
cascaded_base_en = aws_util.AWSInterface(
self.base_subnet_id, self.cascaded_base_ip)
cascaded_api_en = aws_util.AWSInterface(
self.api_subnet_id, self.cascaded_api_ip)
cascaded_tunnel_en = aws_util.AWSInterface(
self.tunnel_subnet_id, self.cascaded_tunnel_ip)
self.cascaded_vm = self.installer.create_vm(self.cascaded_image,
self.cascaded_vm_type,
cascaded_debug_en,
cascaded_base_en,
cascaded_api_en,
cascaded_tunnel_en)
self.cascaded_vm_id = self.cascaded_vm.id
for interface in self.cascaded_vm.interfaces:
if self.cascaded_api_ip == interface.private_ip_address:
self.cascaded_api_interface_id = interface.id
continue
if self.cascaded_base_ip == interface.private_ip_address:
self.cascaded_base_interface_id = interface.id
continue
if self.cascaded_debug_ip == interface.private_ip_address:
self.cascaded_debug_interface_id = interface.id
continue
if self.cascaded_tunnel_ip == interface.private_ip_address:
self.cascaded_tunnel_interface_id = interface.id
continue
cascaded_eip = self.installer.allocate_elastic_address()
self.installer.associate_elastic_address(
eip=cascaded_eip,
network_interface_id=self.cascaded_api_interface_id)
self.cascaded_eip_public_ip = cascaded_eip.public_ip
self.cascaded_eip_allocation_id = cascaded_eip.allocation_id
data_handler.write_cascaded(self.cloud_id,
self.cascaded_vm_id,
self.cascaded_eip_public_ip,
self.cascaded_eip_allocation_id,
self.cascaded_debug_ip,
self.cascaded_debug_interface_id,
self.cascaded_base_ip,
self.cascaded_base_interface_id,
self.cascaded_api_ip,
self.cascaded_api_interface_id,
self.cascaded_tunnel_ip,
self.cascaded_tunnel_interface_id)
def install_vpn(self, api_cidr_block, tunnel_cidr_block,
local_api_cidr, local_tunnel_cidr):
if self.vpn_vm_id:
return
# install vpn vm
self._get_vpn_ip(api_cidr_block, tunnel_cidr_block)
vpn_api_en = aws_util.AWSInterface(
self.api_subnet_id, self.vpn_api_ip)
vpn_tunnel_en = aws_util.AWSInterface(
self.tunnel_subnet_id, self.vpn_tunnel_ip)
self.vpn_vm = self.installer.create_vm(self.vpn_image,
self.vpn_vm_type,
vpn_api_en, vpn_tunnel_en)
self.vpn_vm_id = self.vpn_vm.id
for interface in self.vpn_vm.interfaces:
if self.vpn_api_ip == interface.private_ip_address:
self.vpn_api_interface_id = interface.id
elif self.vpn_tunnel_ip == interface.private_ip_address:
self.vpn_tunnel_interface_id = interface.id
self.installer.disable_network_interface_sdcheck(
self.vpn_api_interface_id)
self.installer.disable_network_interface_sdcheck(
self.vpn_tunnel_interface_id)
vpn_eip = self.installer.allocate_elastic_address()
self.installer.associate_elastic_address(
eip=vpn_eip, network_interface_id=self.vpn_api_interface_id)
self.vpn_eip_public_ip = vpn_eip.public_ip
self.vpn_eip_allocation_id = vpn_eip.allocation_id
self.add_route("api", local_api_cidr)
self.add_route("tunnel", local_tunnel_cidr)
data_handler.write_vpn(self.cloud_id,
self.vpn_vm_id,
self.vpn_eip_public_ip,
self.vpn_eip_allocation_id,
self.vpn_api_ip, self.vpn_tunnel_ip,
self.vpn_api_interface_id,
self.vpn_tunnel_interface_id)
def install_v2v_gateway(self, api_cidr_block):
if self.v2v_vm_id:
return
# install v2v gateway
self._get_v2v_ip(api_cidr_block)
v2v_en = aws_util.AWSInterface(self.api_subnet_id, self.v2v_ip)
v2v_vm = self.installer.create_vm(self.v2v_image,
self.v2v_vm_type,
v2v_en)
self.v2v_vm_id = v2v_vm.id
data_handler.write_v2v_gateway(self.cloud_id,
self.v2v_vm_id, self.v2v_ip)
def install_ceph_cluster(self, api_cidr_block):
if self.ceph_node1_ip:
return
# install ceph cluster
self._get_ceph_cluster_ip(api_cidr_block)
ceph_deploy_en = aws_util.AWSInterface(
self.ceph_subnet_id, self.ceph_deploy_ip)
ceph_deploy_vm = self.installer.create_vm(
self.ceph_image, self.ceph_vm_type, ceph_deploy_en)
self.ceph_deploy_vm_id = ceph_deploy_vm.id
ceph_node1_en = aws_util.AWSInterface(
self.ceph_subnet_id, self.ceph_node1_ip)
ceph_node1_vm = self.installer.create_vm(
self.ceph_image, self.ceph_vm_type, ceph_node1_en)
self.ceph_node1_vm_id = ceph_node1_vm.id
ceph_node2_en = aws_util.AWSInterface(
self.ceph_subnet_id, self.ceph_node2_ip)
ceph_node2_vm = self.installer.create_vm(
self.ceph_image, self.ceph_vm_type, ceph_node2_en)
self.ceph_node2_vm_id = ceph_node2_vm.id
ceph_node3_en = aws_util.AWSInterface(
self.ceph_subnet_id, self.ceph_node3_ip)
ceph_node3_vm = self.installer.create_vm(
self.ceph_image, self.ceph_vm_type, ceph_node3_en)
self.ceph_node3_vm_id = ceph_node3_vm.id
data_handler.write_ceph_cluster(
self.cloud_id,
self.ceph_deploy_vm_id, self.ceph_deploy_ip,
self.ceph_node1_vm_id, self.ceph_node1_ip,
self.ceph_node2_vm_id, self.ceph_node2_ip,
self.ceph_node3_vm_id, self.ceph_node3_ip)
def query_hynode_image_id(self):
if self.hynode_image_id:
return
if not self.hynode_image:
self.hynode_image_id = self.installer.query_image_id(
self.hynode_image)
data_handler.write_hynode(self.cloud_id, self.hynode_image_id)
def allocate_ext_net_eip(self):
if self.ext_net_eips:
return
first_eip = None
try:
first_eip = self.installer.allocate_elastic_address()
except Exception as e:
logger.error("allocate elastic address error, check the account. "
"error: %s" % e.message)
if not first_eip:
logger.error("allocate elastic address error, check the account.")
self.ext_net_eips[first_eip.public_ip] = first_eip.allocation_id
fist_8bit = first_eip.public_ip.split('.')[0]
while True:
try:
eip = self.installer.allocate_elastic_address()
fist_8bit_ip = eip.public_ip.split('.')[0]
if fist_8bit == fist_8bit_ip:
self.ext_net_eips[eip.public_ip] = eip.allocation_id
else:
self.installer.release_elastic_address(eip.public_ip)
except:
break
data_handler.write_ext_net_eip(
cloud_id=self.cloud_id, ext_net_eips=self.ext_net_eips)
def package_aws_access_cloud_info(self):
if not self.vpc_id:
return None
info = {"vpc":
{"vpc_id": self.vpc_id,
"debug_subnet_cidr": self.debug_subnet_cidr,
"debug_subnet_id": self.debug_subnet_id,
"base_subnet_cidr": self.base_subnet_cidr,
"base_subnet_id": self.base_subnet_id,
"api_subnet_cidr": self.api_subnet_cidr,
"api_subnet_id": self.api_subnet_id,
"tunnel_subnet_cidr": self.tunnel_subnet_cidr,
"tunnel_subnet_id": self.tunnel_subnet_id,
"ceph_subnet_cidr": self.ceph_subnet_cidr,
"ceph_subnet_id": self.ceph_subnet_id,
"gateway_id": self.gateway_id,
"rtb_id": self.rtb_id},
"cascaded":
{"vm_id": self.cascaded_vm_id,
"debug_ip": self.cascaded_debug_ip,
"debug_interface_id": self.cascaded_debug_interface_id,
"base_ip": self.cascaded_base_ip,
"base_interface_id": self.cascaded_base_interface_id,
"api_ip": self.cascaded_api_ip,
"api_interface_id": self.cascaded_api_interface_id,
"tunnel_ip": self.cascaded_tunnel_ip,
"tunnel_interface_id": self.cascaded_tunnel_interface_id,
"eip_public_ip": self.cascaded_eip_public_ip,
"eip_allocation_id": self.cascaded_eip_allocation_id},
"vpn":
{"vm_id": self.vpn_vm_id,
"api_ip": self.vpn_api_ip,
"api_interface_id": self.vpn_api_interface_id,
"tunnel_ip": self.vpn_tunnel_ip,
"tunnel_interface_id": self.vpn_tunnel_interface_id,
"eip_public_ip": self.vpn_eip_public_ip,
"eip_allocation_id": self.vpn_eip_allocation_id},
"ceph_cluster":
{"deploy_vm_id": self.ceph_deploy_vm_id,
"deploy_ip": self.ceph_deploy_ip,
"node1_vm_id": self.ceph_node1_vm_id,
"node1_ip": self.ceph_node1_ip,
"node2_vm_id": self.ceph_node2_vm_id,
"node2_ip": self.ceph_node2_ip,
"node3_vm_id": self.ceph_node3_vm_id,
"node3_ip": self.ceph_node3_ip},
"v2v_gateway":
{"vm_id": self.v2v_vm_id,
"ip": self.v2v_ip},
"hynode":
{"ami_id": self.hynode_image_id},
"ext_net_eips": self.ext_net_eips.keys()}
return info
def rollback(self):
if self.cascaded_eip_public_ip is not None:
self.installer.disassociate_elastic_address(
self.cascaded_eip_public_ip)
self.installer.release_elastic_address(
self.cascaded_eip_allocation_id)
self.cascaded_eip_public_ip = None
self.cascaded_eip_allocation_id = None
if self.vpn_eip_allocation_id is not None:
self.installer.disassociate_elastic_address(
self.vpn_eip_public_ip)
self.installer.release_elastic_address(
self.vpn_eip_allocation_id)
self.vpn_eip_public_ip = None
self.vpn_eip_allocation_id = None
if self.cascaded_vm_id is not None:
self.installer.terminate_instance(self.cascaded_vm_id)
self.cascaded_vm_id = None
if self.vpn_vm_id is not None:
self.installer.terminate_instance(self.vpn_vm_id)
self.vpn_vm_id = None
if self.v2v_vm_id is not None:
self.installer.terminate_instance(self.v2v_vm_id)
self.v2v_vm_id = None
if self.ceph_deploy_vm_id is not None:
self.installer.terminate_instance(self.ceph_deploy_vm_id)
self.ceph_deploy_vm_id = None
if self.ceph_node1_vm_id is not None:
self.installer.terminate_instance(self.ceph_node1_vm_id)
self.ceph_node1_vm_id = None
if self.ceph_node2_vm_id is not None:
self.installer.terminate_instance(self.ceph_node2_vm_id)
self.ceph_node2_vm_id = None
if self.ceph_node3_vm_id is not None:
self.installer.terminate_instance(self.ceph_node3_vm_id)
self.ceph_node3_vm_id = None
if self.gateway_id is not None:
self.installer.detach_internet_gateway(self.gateway_id, self.vpc_id)
self.installer.delete_internet_gateway(self.gateway_id)
self.gateway_id = None
if self.debug_subnet_id is not None:
self.installer.delete_subnet(self.debug_subnet_id)
self.debug_subnet_id = | |
Filters')
imageFiltersWindow.attributes('-toolwindow', 1) # Makes window framing small, like a toolbox/widget.
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
imageFiltersWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 70) + '+' + str(rootDistanceFromScreenTop + 70) )
Gui.root.imageFiltersWindow = imageFiltersWindow
mainFrame = Tk.Frame(imageFiltersWindow)
ttk.Label(mainFrame, text='Only show textures that meet this criteria:').pack(padx=10, pady=4)
widthTuple = imageFilters['widthFilter']
row1 = Tk.Frame(mainFrame)
ttk.Label(row1, text='Width: ').pack(side='left')
widthComparator = Tk.StringVar()
widthComparator.set( widthTuple[0] )
Tk.OptionMenu(row1, widthComparator, '<', '<=', '=', '>', '>=').pack(side='left')
widthValue = Tk.StringVar()
widthValue.set( widthTuple[1] )
Tk.Entry(row1, textvariable=widthValue, width=6).pack(side='left')
row1.pack(padx=10, pady=4)
heightTuple = imageFilters['heightFilter']
row2 = Tk.Frame(mainFrame)
ttk.Label(row2, text='Height: ').pack(side='left')
heightComparator = Tk.StringVar()
heightComparator.set( heightTuple[0] )
Tk.OptionMenu(row2, heightComparator, '<', '<=', '=', '>', '>=').pack(side='left')
heightValue = Tk.StringVar()
heightValue.set( heightTuple[1] )
Tk.Entry(row2, textvariable=heightValue, width=6).pack(side='left')
row2.pack(padx=10, pady=4)
aspectRatioTuple = imageFilters['aspectRatioFilter']
row3 = Tk.Frame(mainFrame)
ttk.Label(row3, text='Aspect Ratio: ').pack(side='left')
aspectRatioComparator = Tk.StringVar()
aspectRatioComparator.set( aspectRatioTuple[0] )
Tk.OptionMenu(row3, aspectRatioComparator, '<', '<=', '=', '>', '>=').pack(side='left')
aspectRatioValue = Tk.StringVar()
aspectRatioValue.set( aspectRatioTuple[1] )
Tk.Entry(row3, textvariable=aspectRatioValue, width=6).pack(side='left')
row3.pack(padx=10, pady=4)
imageTypeTuple = imageFilters['imageTypeFilter']
row4 = Tk.Frame(mainFrame)
ttk.Label(row4, text='Texture Type: ').pack(side='left')
imageTypeComparator = Tk.StringVar()
imageTypeComparator.set( imageTypeTuple[0] )
Tk.OptionMenu(row4, imageTypeComparator, '<', '<=', '=', '>', '>=').pack(side='left')
imageTypeValue = Tk.StringVar()
imageTypeValue.set( imageTypeTuple[1] )
Tk.Entry(row4, textvariable=imageTypeValue, width=6).pack(side='left')
row4.pack(padx=10, pady=4)
offsetTuple = imageFilters['offsetFilter']
row5 = Tk.Frame(mainFrame)
ttk.Label(row5, text='Offset (location in file): ').pack(side='left')
offsetComparator = Tk.StringVar()
offsetComparator.set( offsetTuple[0] )
Tk.OptionMenu(row5, offsetComparator, '<', '<=', '=', '>', '>=').pack(side='left')
offsetValue = Tk.StringVar()
offsetValue.set( offsetTuple[1] )
Tk.Entry(row5, textvariable=offsetValue, width=10).pack(side='left')
row5.pack(padx=10, pady=4)
# Button functions
def close():
Gui.root.imageFiltersWindow.destroy()
Gui.root.imageFiltersWindow = None
imageFiltersWindow.protocol('WM_DELETE_WINDOW', close) # Overrides the 'X' close button.
def save():
if not os.path.exists( settingsFile ):
msg( 'Unable to find the settings file. Reloading this window should recreate it.' )
return False
unsavedSettings = []
with open( settingsFile, 'w') as theSettingsFile:
# For each setting, if the value is a number or blank, update the value and its comparitor in the program and settings file.
width = widthValue.get().replace(',', '')
if not isNaN(width) or width == '':
imageFilters['widthFilter'] = ( widthComparator.get(), width )
settings.set( 'Texture Search Filters', 'widthFilter', widthComparator.get() + '|' + width )
else: unsavedSettings.append( 'width' )
height = heightValue.get().replace(',', '')
if not isNaN(height) or height == '':
imageFilters['heightFilter'] = ( heightComparator.get(), height )
settings.set( 'Texture Search Filters', 'heightFilter', heightComparator.get() + '|' + height )
else: unsavedSettings.append( 'height' )
aspectRatio = aspectRatioValue.get()
try:
# Make sure that the aspect ratio can be converted to a number.
if ':' in aspectRatio:
numerator, denomenator = aspectRatio.split(':')
convertedAspectRatio = float(numerator) / float(denomenator)
elif '/' in aspectRatio:
numerator, denomenator = aspectRatio.split('/')
convertedAspectRatio = float(numerator) / float(denomenator)
elif aspectRatio != '': convertedAspectRatio = float(aspectRatio)
if aspectRatio == '' or not isNaN( convertedAspectRatio ):
imageFilters['aspectRatioFilter'] = ( aspectRatioComparator.get(), aspectRatio )
settings.set( 'Texture Search Filters', 'aspectRatioFilter', aspectRatioComparator.get() + '|' + aspectRatio )
else: unsavedSettings.append( 'aspect ratio' )
except:
unsavedSettings.append( 'aspect ratio' )
imageType = imageTypeValue.get().replace('_', '')
if not isNaN(imageType) or imageType == '':
imageFilters['imageTypeFilter'] = ( imageTypeComparator.get(), imageType ) # str(int()) is in case the value was in hex
settings.set( 'Texture Search Filters', 'imageTypeFilter', imageTypeComparator.get() + '|' + imageType )
else: unsavedSettings.append( 'texture type' )
offset = offsetValue.get().replace(',', '')
if (validOffset(offset) and not isNaN(int(offset,16))) or offset == '':
imageFilters['offsetFilter'] = ( offsetComparator.get(), offset )
settings.set( 'Texture Search Filters', 'offsetFilter', offsetComparator.get() + '|' + offset )
else: unsavedSettings.append( 'offset' )
settings.write( theSettingsFile )
if unsavedSettings != []:
msg('The filters for ' + grammarfyList( unsavedSettings ) + ' could not saved. The entries must be a number or left blank, with the '
'exception of aspect ratio (which may be a number, fraction, float (decimal), or a ratio like "4:3").')
imageFiltersWindow.lift()
return False
else: return True
def saveNclose():
successfullySaved = save()
# If saving doesn't work or the settings file wasn't found, don't close the window, so the settings aren't lost.
if successfullySaved: close()
def saveNreload():
success = save()
if success: # If the settings file wasn't found, don't close the window, so the settings aren't lost.
close()
clearDatTab()
scanDat()
# Switch to the DAT Texture Tree tab
Gui.mainTabFrame.select( Gui.datTab ) # scanDat will now be called by the onMainTabChanged event handler
def clear(): # Set all values back to default.
widthComparator.set( '=' )
widthValue.set( '' )
heightComparator.set( '=' )
heightValue.set( '' )
aspectRatioComparator.set( '=' )
aspectRatioValue.set( '' )
imageTypeComparator.set( '=' )
imageTypeValue.set( '' )
offsetComparator.set( '=' )
offsetValue.set( '' )
# The buttons.
row6 = Tk.Frame( mainFrame, width=200 )
btnFrame = Tk.Frame(row6)
ttk.Button( btnFrame, text='Clear',command=clear ).pack( side='left', padx=5 )
ttk.Button( btnFrame, text='Save', command=saveNclose ).pack( side='right', padx=5 )
btnFrame.pack()
ttk.Button( row6, text='Save and Rescan Textures', command=saveNreload ).pack( fill='x', padx=5, pady=4 )
row6.pack( pady=4 )
mainFrame.pack()
def showHelpWindow():
if Gui.root.helpWindow != None: Gui.root.helpWindow.deiconify()
else:
loadSettings() # Persistent storage from settings.ini
# Define the window
helpWindow = Tk.Toplevel()
helpWindow.title('Help')
helpWindow.attributes('-toolwindow', 1) # Makes window framing small, like a toolbox/widget.
helpWindow.resizable(width=False, height=False)
helpWindow.wm_attributes('-topmost', 1) # Makes window stay topmost (main program still usable).
Gui.root.helpWindow = helpWindow
# Calculate the spawning position of the new window
rootDistanceFromScreenLeft, rootDistanceFromScreenTop = getWindowGeometry( Gui.root )[2:]
helpWindow.geometry( '+' + str(rootDistanceFromScreenLeft + 180) + '+' + str(rootDistanceFromScreenTop + 140) )
helpWindow.focus()
mainFrame = Tk.Frame(helpWindow)
# Button functions
def close():
Gui.root.helpWindow.destroy()
Gui.root.helpWindow = None
helpWindow.protocol('WM_DELETE_WINDOW', close) # Overrides the 'X' close button.
def gotoWorkshop( event ): webbrowser.open( 'http://smashboards.com/forums/melee-workshop.271/' )
def gotoOfficialThread( event ): webbrowser.open( 'http://smashboards.com/threads/new-tools-for-texture-hacking.373777/' )
def gotoHowToHackAnyTexture( event ): webbrowser.open( 'http://smashboards.com/threads/how-to-hack-any-texture.388956/' )
def gotoMeleeHacksAndYou( event ): webbrowser.open( 'http://smashboards.com/threads/melee-hacks-and-you-updated-5-21-2015.247119/#post-4917885' )
label = ttk.Label( mainFrame, text='- = The Melee Workshop = -', foreground='#00F', cursor='hand2' )
label.bind( '<1>', gotoWorkshop )
label.pack(pady=4)
gridSection = Tk.Frame( mainFrame ) # These contents are grouped together so they can use the grid geometry manager rather than .pack()
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=0, columnspan=2 )
label = ttk.Label( gridSection, text='Read Up on Program Usage', foreground='#00F', cursor='hand2' )
label.bind( '<1>', showReadMeFile )
label.grid( column=0, row=1 )
ttk.Label( gridSection, text='For documentation on this program').grid( column=1, row=1 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=2, columnspan=2 )
label = ttk.Label( gridSection, text="DTW's Official Thread", foreground='#00F', cursor='hand2' )
label.bind('<1>', gotoOfficialThread)
label.grid( column=0, row=3 )
ttk.Label( gridSection, text='Questions, feature requests, and other discussion on '
'this program can be posted here').grid( column=1, row=3 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=4, columnspan=2 )
label = ttk.Label( gridSection, text='How to Hack Any Texture', foreground='#00F', cursor='hand2' )
label.bind('<1>', gotoHowToHackAnyTexture)
label.grid( column=0, row=5 )
ttk.Label( gridSection, text="If for some reason your texture doesn't "
"appear in this program, then you can fall back onto this thread").grid( column=1, row=5 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=6, columnspan=2 )
label = ttk.Label( gridSection, text='OP of Melee Hacks and You', foreground='#00F', cursor='hand2' )
label.bind('<1>', gotoMeleeHacksAndYou)
label.grid( column=0, row=7 )
ttk.Label( gridSection, text='The first post in this thread contains many '
'resources on all subjects to help you get started').grid( column=1, row=7 )
ttk.Label( gridSection, image=Gui.imageBank('helpWindowDivider') ).grid( column=0, row=8, columnspan=2 )
for label in gridSection.grid_slaves( column=1 ):
label.config( wraplength=220 )
for label in gridSection.winfo_children():
label.grid_configure( ipady=4, padx=7 )
gridSection.pack( padx=4 )
ttk.Label( mainFrame, text='Random Pro-tip: ' + proTips[random.randint( 1, len(proTips) )], wraplength=380 ).pack( padx=4, pady=12 )
mainFrame.pack()
proTips = {
1: ( "Did you know that you can drag-and-drop files directly onto "
"the program icon (the .exe file) or the GUI to open them?" ),
2: ( "There are multiple useful behaviors you can call upon when importing textures:"
"\n- When viewing the contents of a disc on the 'Disc File Tree' tab. The imported "
"texture's destination will be determined by the file's name. For example, "
'the file "MnSlMap.usd_0x38840_2.png" would be imported into the disc in the file "MnSlMap.usd" '
"at offset 0x38840. This can be very useful for bulk importing many textures at once."
"\n- Navigate to a specific texture in the 'DAT Texture Tree' tab, select a texture, and you "
'can import a texture to replace it with without concern for how the file is named.' ),
3: ( 'The color of the status message ("File Scan Complete", etc.) is purely used to indicate '
"whether or not there are changes that have yet to be saved. Green means everything has "
"been saved to disc/file. Red means there are changes that have not yet been saved." ),
4: ( "For CSPs (Character Select Portraits), if you're trying to mimic "
"the game's original CSP shadows, they are 10px down and 10px to the left." ),
5: ( "Use the boost to chase!" ),
6: ( "When working in GIMP and opting to use a palette, it's important that you delete "
"ALL hidden and unused layers BEFORE generating a palette for your texture. "
"This is because if other layers are present, even if not visible, GIMP "
"will take their colors into account to generate a palette. (If you have a lot of "
"layers, an easier method may be to create a 'New from Visible' layer, and then copy that "
"to a new, blank project.)" ),
7: ( "Did you know that if you hold SHIFT while right-clicking "
"on a file in Windows, there appears a context menu option called "
"'Copy as path'? This will copy the file's full path into your clipboard, "
"so you can then easily paste it into | |
disk space.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of all flavors with additional information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavors'] = list()
for flavor in self.api.compute.flavors.values():
# use the class dict. it should work fine
# but use a copy so we don't modifiy the original
f = flavor.__dict__.copy()
# add additional expected stuff stay openstack compatible
f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
f['OS-FLV-DISABLED:disabled'] = False
f['OS-FLV-EXT-DATA:ephemeral'] = 0
f['os-flavor-access:is_public'] = True
f['ram'] = flavor.memory
f['vcpus'] = flavor.cpu
f['swap'] = 0
f['disk'] = flavor.storage
f['rxtx_factor'] = 1.0
resp['flavors'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of servers." % __name__)
return str(ex), 500
def post(self, id):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
data = json.loads(request.data).get("flavor")
LOG.warning("Create Flavor: %s" % str(data))
# add to internal dict
f = self.api.compute.add_flavor(
data.get("name"),
data.get("vcpus"),
data.get("ram"), "MB",
data.get("disk"), "GB")
# create response based on incoming data
data["id"] = f.id
data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
f.id)}]
resp = {"flavor": data}
return Response(json.dumps(resp), status=200,
mimetype="application/json")
class NovaListFlavorById(Resource):
def __init__(self, api):
self.api = api
def get(self, id, flavorid):
"""
Returns details about one flavor.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:param flavorid: Represents the flavor.
:type flavorid: ``str``
:return: Returns a flask response with detailed information about the flavor.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavor'] = dict()
flavor = self.api.compute.flavors.get(flavorid, None)
if flavor is None:
for f in self.api.compute.flavors.values():
if f.id == flavorid:
flavor = f
break
resp['flavor']['id'] = flavor.id
resp['flavor']['name'] = flavor.name
resp['flavor']['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve flavor with id %s" %
(__name__, flavorid))
return str(ex), 500
def delete(self, id, flavorid):
"""
Removes the given flavor.
Does not really remove anything from the machine, just fakes an OK.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
return Response("", status=204, mimetype="application/json")
class NovaListImages(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list of all usable images.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of available images.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['images'] = list()
for image in self.api.compute.images.values():
f = dict()
f['id'] = image.id
f['name'] = str(image.name).replace(":latest", "")
f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id)}]
resp['images'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of images." % __name__)
return str(ex), 500
class NovaListImagesDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
As List Images but with additional metadata.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of images and their metadata.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['images'] = list()
for image in self.api.compute.images.values():
# use the class dict. it should work fine
# but use a copy so we don't modifiy the original
f = image.__dict__.copy()
# add additional expected stuff stay openstack compatible
f['name'] = str(image.name).replace(":latest", "")
f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id)}]
f['metadata'] = {
"architecture": "x86_64",
"auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
}
resp['images'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of images." % __name__)
return str(ex), 500
class NovaListImageById(Resource):
def __init__(self, api):
self.api = api
def get(self, id, imageid):
"""
Gets an image by id from the emulator with openstack nova compliant return values.
:param id: tenantid, we ignore this most of the time
:type id: ``str``
:param imageid: id of the image. If it is 1 the dummy CREATE-IMAGE is returned
:type imageid: ``str``
:return: Returns a flask response with the information about one image.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
i = resp['image'] = dict()
for image in self.api.compute.images.values():
if image.id == imageid or image.name == imageid:
i['id'] = image.id
i['name'] = image.name
return Response(json.dumps(resp), status=200,
mimetype="application/json")
response = Response(
"Image with id or name %s does not exists." % imageid, status=404)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve image with id %s." %
(__name__, imageid))
return str(ex), 500
def delete(self, id, imageid):
"""
Removes the given image.
Does not really remove anything from the machine, just fakes an OK.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
return Response("", status=204, mimetype="application/json")
class NovaShowServerDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id, serverid):
"""
Returns detailed information about the specified server.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:param serverid: Specifies the requested server.
:type serverid: ``str``
:return: Returns a flask response with details about the server.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response(
"Server with id or name %s does not exists." % serverid, status=404)
s = server.create_server_dict()
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
flavor = self.api.compute.flavors[server.flavor]
s['flavor'] = {
"id": flavor.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id),
"rel": "bookmark"
}
]
}
image = self.api.compute.images[server.image]
s['image'] = {
"id": image.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id),
"rel": "bookmark"
}
]
}
response = Response(json.dumps(
{'server': s}), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the server details." % __name__)
return str(ex), 500
def delete(self, id, serverid):
"""
Delete a server instance.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: The UUID of the server
:type serverid: ``str``
:return: Returns 204 if everything is fine.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response('Could not find server.',
status=404, mimetype="application/json")
self.api.compute.stop_compute(server)
response = Response('', status=204, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not create the server." % __name__)
return str(ex), 500
class NovaInterfaceToServer(Resource):
def __init__(self, api):
self.api = api
def post(self, id, serverid):
"""
Add an interface to the specified server.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: Specifies the server.
:type serverid: ``str``
:return: Returns a flask response with information about the attached interface.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response(
"Server with id or name %s does not exists." % serverid, status=404)
if server.emulator_compute is None:
LOG.error("The targeted container does not exist.")
return Response(
"The targeted container of %s does not exist." % serverid, status=404)
data = json.loads(request.data).get("interfaceAttachment")
resp = dict()
port = data.get("port_id", None)
net = data.get("net_id", None)
dc = self.api.compute.dc
network_dict = dict()
network = None
if net is not None and port is not None:
port = self.api.compute.find_port_by_name_or_id(port)
network = self.api.compute.find_network_by_name_or_id(net)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = network.name
elif net is not None:
network = self.api.compute.find_network_by_name_or_id(net)
if network is None:
return Response(
"Network with id or name %s does not exists." % net, status=404)
port = self.api.compute.create_port("port:cp%s:fl:%s" %
(len(self.api.compute.ports), str(uuid.uuid4())))
port.net_name = network.name
port.ip_address = network.get_new_ip_address(port.name)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = network.name
elif port is not None:
port = self.api.compute.find_port_by_name_or_id(port)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network = self.api.compute.find_network_by_name_or_id(
port.net_name)
network_dict[network_dict['id']] = network.name
else:
raise Exception(
"You can only attach | |
'properties.networkAcls', 'type': 'NetworkRuleSet'},
'is_hns_enabled': {'key': 'properties.isHnsEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.sku = None
self.kind = None
self.identity = kwargs.get('identity', None)
self.provisioning_state = None
self.primary_endpoints = None
self.primary_location = None
self.status_of_primary = None
self.last_geo_failover_time = None
self.secondary_location = None
self.status_of_secondary = None
self.creation_time = None
self.custom_domain = None
self.secondary_endpoints = None
self.encryption = None
self.access_tier = None
self.enable_https_traffic_only = kwargs.get('enable_https_traffic_only', False)
self.network_rule_set = None
self.is_hns_enabled = kwargs.get('is_hns_enabled', False)
class StorageAccountCheckNameAvailabilityParameters(msrest.serialization.Model):
"""The parameters used to check the availability of the storage account name.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The storage account name.
:type name: str
:ivar type: Required. The type of resource, Microsoft.Storage/storageAccounts. Default value:
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "Microsoft.Storage/storageAccounts"
def __init__(
self,
**kwargs
):
super(StorageAccountCheckNameAvailabilityParameters, self).__init__(**kwargs)
self.name = kwargs['name']
class StorageAccountCreateParameters(msrest.serialization.Model):
"""The parameters used when creating a storage account.
All required parameters must be populated in order to send to Azure.
:param sku: Required. Required. Gets or sets the sku name.
:type sku: ~azure.mgmt.storage.v2018_02_01.models.Sku
:param kind: Required. Required. Indicates the type of storage account. Possible values
include: "Storage", "StorageV2", "BlobStorage".
:type kind: str or ~azure.mgmt.storage.v2018_02_01.models.Kind
:param location: Required. Required. Gets or sets the location of the resource. This will be
one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia,
etc.). The geo region of a resource cannot be changed once it is created, but if an identical
geo region is specified on update, the request will succeed.
:type location: str
:param tags: A set of tags. Gets or sets a list of key value pairs that describe the resource.
These tags can be used for viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no
greater than 128 characters and a value with a length no greater than 256 characters.
:type tags: dict[str, str]
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.storage.v2018_02_01.models.Identity
:param custom_domain: User domain assigned to the storage account. Name is the CNAME source.
Only one custom domain is supported per storage account at this time. To clear the existing
custom domain, use an empty string for the custom domain name property.
:type custom_domain: ~azure.mgmt.storage.v2018_02_01.models.CustomDomain
:param encryption: Provides the encryption settings on the account. If left unspecified the
account encryption settings will remain the same. The default setting is unencrypted.
:type encryption: ~azure.mgmt.storage.v2018_02_01.models.Encryption
:param network_rule_set: Network rule set.
:type network_rule_set: ~azure.mgmt.storage.v2018_02_01.models.NetworkRuleSet
:param access_tier: Required for storage accounts where kind = BlobStorage. The access tier
used for billing. Possible values include: "Hot", "Cool".
:type access_tier: str or ~azure.mgmt.storage.v2018_02_01.models.AccessTier
:param enable_https_traffic_only: Allows https traffic only to storage service if sets to true.
:type enable_https_traffic_only: bool
:param is_hns_enabled: Account HierarchicalNamespace enabled if sets to true.
:type is_hns_enabled: bool
"""
_validation = {
'sku': {'required': True},
'kind': {'required': True},
'location': {'required': True},
}
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'network_rule_set': {'key': 'properties.networkAcls', 'type': 'NetworkRuleSet'},
'access_tier': {'key': 'properties.accessTier', 'type': 'str'},
'enable_https_traffic_only': {'key': 'properties.supportsHttpsTrafficOnly', 'type': 'bool'},
'is_hns_enabled': {'key': 'properties.isHnsEnabled', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountCreateParameters, self).__init__(**kwargs)
self.sku = kwargs['sku']
self.kind = kwargs['kind']
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.custom_domain = kwargs.get('custom_domain', None)
self.encryption = kwargs.get('encryption', None)
self.network_rule_set = kwargs.get('network_rule_set', None)
self.access_tier = kwargs.get('access_tier', None)
self.enable_https_traffic_only = kwargs.get('enable_https_traffic_only', False)
self.is_hns_enabled = kwargs.get('is_hns_enabled', False)
class StorageAccountKey(msrest.serialization.Model):
"""An access key for the storage account.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar key_name: Name of the key.
:vartype key_name: str
:ivar value: Base 64-encoded value of the key.
:vartype value: str
:ivar permissions: Permissions for the key -- read-only or full permissions. Possible values
include: "Read", "Full".
:vartype permissions: str or ~azure.mgmt.storage.v2018_02_01.models.KeyPermission
"""
_validation = {
'key_name': {'readonly': True},
'value': {'readonly': True},
'permissions': {'readonly': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'permissions': {'key': 'permissions', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountKey, self).__init__(**kwargs)
self.key_name = None
self.value = None
self.permissions = None
class StorageAccountListKeysResult(msrest.serialization.Model):
"""The response from the ListKeys operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar keys: Gets the list of storage account keys and their properties for the specified
storage account.
:vartype keys: list[~azure.mgmt.storage.v2018_02_01.models.StorageAccountKey]
"""
_validation = {
'keys': {'readonly': True},
}
_attribute_map = {
'keys': {'key': 'keys', 'type': '[StorageAccountKey]'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountListKeysResult, self).__init__(**kwargs)
self.keys = None
class StorageAccountListResult(msrest.serialization.Model):
"""The response from the List Storage Accounts operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Gets the list of storage accounts and their properties.
:vartype value: list[~azure.mgmt.storage.v2018_02_01.models.StorageAccount]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccount]'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountListResult, self).__init__(**kwargs)
self.value = None
class StorageAccountRegenerateKeyParameters(msrest.serialization.Model):
"""The parameters used to regenerate the storage account key.
All required parameters must be populated in order to send to Azure.
:param key_name: Required. The name of storage keys that want to be regenerated, possible
values are key1, key2.
:type key_name: str
"""
_validation = {
'key_name': {'required': True},
}
_attribute_map = {
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountRegenerateKeyParameters, self).__init__(**kwargs)
self.key_name = kwargs['key_name']
class StorageAccountUpdateParameters(msrest.serialization.Model):
"""The parameters that can be provided when updating the storage account properties.
:param sku: Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS
or Premium_LRS, nor can accounts of those sku names be updated to any other value.
:type sku: ~azure.mgmt.storage.v2018_02_01.models.Sku
:param tags: A set of tags. Gets or sets a list of key value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across resource groups). A
maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in
length than 128 characters and a value no greater in length than 256 characters.
:type tags: dict[str, str]
:param identity: The identity of the resource.
:type identity: ~azure.mgmt.storage.v2018_02_01.models.Identity
:param kind: Optional. Indicates the type of storage account. Currently only StorageV2 value
supported by server. Possible values include: "Storage", "StorageV2", "BlobStorage".
:type kind: str or ~azure.mgmt.storage.v2018_02_01.models.Kind
:param custom_domain: Custom domain assigned to the storage account by the user. Name is the
CNAME source. Only one custom domain is supported per storage account at this time. To clear
the existing custom domain, use an empty string for the custom domain name property.
:type custom_domain: ~azure.mgmt.storage.v2018_02_01.models.CustomDomain
:param encryption: Provides the encryption settings on the account. The default setting is
unencrypted.
:type encryption: ~azure.mgmt.storage.v2018_02_01.models.Encryption
:param access_tier: Required for storage accounts where kind = BlobStorage. The access tier
used for billing. Possible values include: "Hot", "Cool".
:type access_tier: str or ~azure.mgmt.storage.v2018_02_01.models.AccessTier
:param enable_https_traffic_only: Allows https traffic only to storage service if sets to true.
:type enable_https_traffic_only: bool
:param network_rule_set: Network rule set.
:type network_rule_set: ~azure.mgmt.storage.v2018_02_01.models.NetworkRuleSet
"""
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
'kind': {'key': 'kind', 'type': 'str'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
'encryption': {'key': 'properties.encryption', 'type': 'Encryption'},
'access_tier': {'key': 'properties.accessTier', 'type': 'str'},
'enable_https_traffic_only': {'key': 'properties.supportsHttpsTrafficOnly', 'type': 'bool'},
'network_rule_set': {'key': 'properties.networkAcls', 'type': 'NetworkRuleSet'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountUpdateParameters, self).__init__(**kwargs)
self.sku | |
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import datetime
import decimal
import json
import random
import unittest
import uuid
import edgedb
from edgedb import _taskgroup as tg
from edgedb import _testbase as tb
class TestAsyncFetch(tb.AsyncQueryTestCase):
ISOLATED_METHODS = False
SETUP = '''
CREATE TYPE test::Tmp {
CREATE REQUIRED PROPERTY tmp -> std::str;
};
CREATE SCALAR TYPE MyEnum EXTENDING enum<"A", "B">;
'''
TEARDOWN = '''
DROP TYPE test::Tmp;
'''
async def test_async_parse_error_recover_01(self):
for _ in range(2):
with self.assertRaises(edgedb.EdgeQLSyntaxError):
await self.con.query('select syntax error')
with self.assertRaises(edgedb.EdgeQLSyntaxError):
await self.con.query('select syntax error')
with self.assertRaisesRegex(edgedb.EdgeQLSyntaxError,
'Unexpected end of line'):
await self.con.query('select (')
with self.assertRaisesRegex(edgedb.EdgeQLSyntaxError,
'Unexpected end of line'):
await self.con.query_json('select (')
for _ in range(10):
self.assertEqual(
await self.con.query('select 1;'),
edgedb.Set((1,)))
self.assertFalse(self.con.is_closed())
async def test_async_parse_error_recover_02(self):
for _ in range(2):
with self.assertRaises(edgedb.EdgeQLSyntaxError):
await self.con.execute('select syntax error')
with self.assertRaises(edgedb.EdgeQLSyntaxError):
await self.con.execute('select syntax error')
for _ in range(10):
await self.con.execute('select 1; select 2;'),
async def test_async_exec_error_recover_01(self):
for _ in range(2):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.query('select 1 / 0;')
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.query('select 1 / 0;')
for _ in range(10):
self.assertEqual(
await self.con.query('select 1;'),
edgedb.Set((1,)))
async def test_async_exec_error_recover_02(self):
for _ in range(2):
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute('select 1 / 0;')
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.execute('select 1 / 0;')
for _ in range(10):
await self.con.execute('select 1;')
async def test_async_exec_error_recover_03(self):
query = 'select 10 // <int64>$0;'
for i in [1, 2, 0, 3, 1, 0, 1]:
if i:
self.assertEqual(
await self.con.query(query, i),
edgedb.Set([10 // i]))
else:
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.query(query, i)
async def test_async_exec_error_recover_04(self):
for i in [1, 2, 0, 3, 1, 0, 1]:
if i:
await self.con.execute(f'select 10 // {i};')
else:
with self.assertRaises(edgedb.DivisionByZeroError):
await self.con.query(f'select 10 // {i};')
async def test_async_exec_error_recover_05(self):
with self.assertRaisesRegex(edgedb.QueryError,
'cannot accept parameters'):
await self.con.execute(f'select <int64>$0')
self.assertEqual(
await self.con.query('SELECT "HELLO"'),
["HELLO"])
async def test_async_fetch_single_command_01(self):
r = await self.con.query('''
CREATE TYPE test::server_fetch_single_command_01 {
CREATE REQUIRED PROPERTY server_fetch_single_command_01 ->
std::str;
};
''')
self.assertEqual(r, [])
r = await self.con.query('''
DROP TYPE test::server_fetch_single_command_01;
''')
self.assertEqual(r, [])
r = await self.con.query('''
CREATE TYPE test::server_fetch_single_command_01 {
CREATE REQUIRED PROPERTY server_fetch_single_command_01 ->
std::str;
};
''')
self.assertEqual(r, [])
r = await self.con.query_json('''
DROP TYPE test::server_fetch_single_command_01;
''')
self.assertEqual(r, '[]')
r = await self.con.query_json('''
CREATE TYPE test::server_fetch_single_command_01 {
CREATE REQUIRED PROPERTY server_fetch_single_command_01 ->
std::str;
};
''')
self.assertEqual(r, '[]')
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'query cannot be executed with query_one_json\('):
await self.con.query_one_json('''
DROP TYPE test::server_fetch_single_command_01;
''')
r = await self.con.query_json('''
DROP TYPE test::server_fetch_single_command_01;
''')
self.assertEqual(r, '[]')
async def test_async_fetch_single_command_02(self):
r = await self.con.query('''
SET MODULE default;
''')
self.assertEqual(r, [])
r = await self.con.query('''
RESET ALIAS *;
''')
self.assertEqual(r, [])
r = await self.con.query('''
SET ALIAS bar AS MODULE std;
''')
self.assertEqual(r, [])
r = await self.con.query('''
SET MODULE default;
''')
self.assertEqual(r, [])
r = await self.con.query('''
SET ALIAS bar AS MODULE std;
''')
self.assertEqual(r, [])
r = await self.con.query_json('''
SET MODULE default;
''')
self.assertEqual(r, '[]')
r = await self.con.query_json('''
SET ALIAS foo AS MODULE default;
''')
self.assertEqual(r, '[]')
async def test_async_fetch_single_command_03(self):
qs = [
'START TRANSACTION',
'DECLARE SAVEPOINT t0',
'ROLLBACK TO SAVEPOINT t0',
'RELEASE SAVEPOINT t0',
'ROLLBACK',
'START TRANSACTION',
'COMMIT',
]
for _ in range(3):
for q in qs:
r = await self.con.query(q)
self.assertEqual(r, [])
for q in qs:
r = await self.con.query_json(q)
self.assertEqual(r, '[]')
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'cannot be executed with query_one\(\).*'
r'not return'):
await self.con.query_one('START TRANSACTION')
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'cannot be executed with query_one_json\(\).*'
r'not return'):
await self.con.query_one_json('START TRANSACTION')
async def test_async_fetch_single_command_04(self):
with self.assertRaisesRegex(edgedb.ProtocolError,
'expected one statement'):
await self.con.query('''
SELECT 1;
SET MODULE blah;
''')
with self.assertRaisesRegex(edgedb.ProtocolError,
'expected one statement'):
await self.con.query_one('''
SELECT 1;
SET MODULE blah;
''')
with self.assertRaisesRegex(edgedb.ProtocolError,
'expected one statement'):
await self.con.query_json('''
SELECT 1;
SET MODULE blah;
''')
async def test_async_basic_datatypes_01(self):
for _ in range(10):
self.assertEqual(
await self.con.query_one(
'select ()'),
())
self.assertEqual(
await self.con.query(
'select (1,)'),
edgedb.Set([(1,)]))
async with self.con.transaction(isolation='repeatable_read'):
self.assertEqual(
await self.con.query_one(
'select <array<int64>>[]'),
[])
self.assertEqual(
await self.con.query(
'select ["a", "b"]'),
edgedb.Set([["a", "b"]]))
self.assertEqual(
await self.con.query('''
SELECT {(a := 1 + 1 + 40, world := ("hello", 32)),
(a:=1, world := ("yo", 10))};
'''),
edgedb.Set([
edgedb.NamedTuple(a=42, world=("hello", 32)),
edgedb.NamedTuple(a=1, world=("yo", 10)),
]))
with self.assertRaisesRegex(
edgedb.InterfaceError,
r'query_one\(\) as it returns a multiset'):
await self.con.query_one('SELECT {1, 2}')
with self.assertRaisesRegex(edgedb.NoDataError, r'\bquery_one\('):
await self.con.query_one('SELECT <int64>{}')
async def test_async_basic_datatypes_02(self):
self.assertEqual(
await self.con.query(
r'''select [b"\x00a", b"b", b'', b'\na']'''),
edgedb.Set([[b"\x00a", b"b", b'', b'\na']]))
self.assertEqual(
await self.con.query(
r'select <bytes>$0', b'he\x00llo'),
edgedb.Set([b'he\x00llo']))
async def test_async_basic_datatypes_03(self):
for _ in range(10): # test opportunistic execute
self.assertEqual(
await self.con.query_json(
'select ()'),
'[[]]')
self.assertEqual(
await self.con.query_json(
'select (1,)'),
'[[1]]')
self.assertEqual(
await self.con.query_json(
'select <array<int64>>[]'),
'[[]]')
self.assertEqual(
json.loads(
await self.con.query_json(
'select ["a", "b"]')),
[["a", "b"]])
self.assertEqual(
json.loads(
await self.con.query_one_json(
'select ["a", "b"]')),
["a", "b"])
self.assertEqual(
json.loads(
await self.con.query_json('''
SELECT {(a := 1 + 1 + 40, world := ("hello", 32)),
(a:=1, world := ("yo", 10))};
''')),
[
{"a": 42, "world": ["hello", 32]},
{"a": 1, "world": ["yo", 10]}
])
self.assertEqual(
json.loads(
await self.con.query_json('SELECT {1, 2}')),
[1, 2])
self.assertEqual(
json.loads(await self.con.query_json('SELECT <int64>{}')),
[])
with self.assertRaises(edgedb.NoDataError):
await self.con.query_one_json('SELECT <int64>{}')
async def test_async_basic_datatypes_04(self):
val = await self.con.query_one(
'''
SELECT schema::ObjectType {
foo := {
[(a := 1, b := 2), (a := 3, b := 4)],
[(a := 5, b := 6)],
<array <tuple<a: int64, b: int64>>>[],
}
} LIMIT 1
'''
)
self.assertEqual(
val.foo,
edgedb.Set([
edgedb.Array([
edgedb.NamedTuple(a=1, b=2),
edgedb.NamedTuple(a=3, b=4),
]),
edgedb.Array([
edgedb.NamedTuple(a=5, b=6),
]),
edgedb.Array([]),
]),
)
async def test_async_args_01(self):
self.assertEqual(
await self.con.query(
'select (<array<str>>$foo)[0] ++ (<array<str>>$bar)[0];',
foo=['aaa'], bar=['bbb']),
edgedb.Set(('aaabbb',)))
async def test_async_args_02(self):
self.assertEqual(
await self.con.query(
'select (<array<str>>$0)[0] ++ (<array<str>>$1)[0];',
['aaa'], ['bbb']),
edgedb.Set(('aaabbb',)))
async def test_async_args_03(self):
with self.assertRaisesRegex(edgedb.QueryError, r'missing \$0'):
await self.con.query('select <int64>$1;')
with self.assertRaisesRegex(edgedb.QueryError, r'missing \$1'):
await self.con.query('select <int64>$0 + <int64>$2;')
with self.assertRaisesRegex(edgedb.QueryError,
'combine positional and named parameters'):
await self.con.query('select <int64>$0 + <int64>$bar;')
async def test_async_args_04(self):
aware_datetime = datetime.datetime.now(datetime.timezone.utc)
naive_datetime = datetime.datetime.now()
date = datetime.date.today()
naive_time = datetime.time(hour=11)
aware_time = datetime.time(hour=11, tzinfo=datetime.timezone.utc)
self.assertEqual(
await self.con.query_one(
'select <datetime>$0;',
aware_datetime),
aware_datetime)
self.assertEqual(
await self.con.query_one(
'select <cal::local_datetime>$0;',
naive_datetime),
naive_datetime)
self.assertEqual(
await self.con.query_one(
'select <cal::local_date>$0;',
date),
date)
self.assertEqual(
await self.con.query_one(
'select <cal::local_time>$0;',
naive_time),
naive_time)
with self.assertRaisesRegex(edgedb.InvalidArgumentError,
r'a timezone-aware.*expected'):
await self.con.query_one(
'select <datetime>$0;',
naive_datetime)
with self.assertRaisesRegex(edgedb.InvalidArgumentError,
r'a naive time object.*expected'):
await self.con.query_one(
'select <cal::local_time>$0;',
aware_time)
with self.assertRaisesRegex(edgedb.InvalidArgumentError,
r'a naive datetime object.*expected'):
await self.con.query_one(
'select <cal::local_datetime>$0;',
aware_datetime)
with self.assertRaisesRegex(edgedb.InvalidArgumentError,
r'datetime.datetime object was expected'):
await self.con.query_one(
'select <cal::local_datetime>$0;',
date)
with self.assertRaisesRegex(edgedb.InvalidArgumentError,
r'datetime.datetime object was expected'):
await self.con.query_one(
'select <datetime>$0;',
date)
async def test_async_mismatched_args_01(self):
with self.assertRaisesRegex(
edgedb.QueryArgumentError,
r"expected {'a'} keyword arguments, got {'[bc]', '[bc]'}, "
r"missed {'a'}, extra {'[bc]', '[bc]'}"):
await self.con.query("""SELECT <int64>$a;""", b=1, c=2)
async def test_async_mismatched_args_02(self):
with self.assertRaisesRegex(
edgedb.QueryArgumentError,
r"expected {'[ab]', '[ab]'} keyword arguments, "
r"got {'[acd]', '[acd]', '[acd]'}, "
r"missed {'b'}, extra {'[cd]', '[cd]'}"):
await self.con.query("""
SELECT <int64>$a + <int64>$b;
""", a=1, c=2, d=3)
async def test_async_mismatched_args_03(self):
with self.assertRaisesRegex(
edgedb.QueryArgumentError,
"expected {'a'} keyword arguments, got {'b'}, "
"missed {'a'}, extra {'b'}"):
await self.con.query("""SELECT <int64>$a;""", b=1)
async def test_async_mismatched_args_04(self):
with self.assertRaisesRegex(
edgedb.QueryArgumentError,
r"expected {'[ab]', '[ab]'} keyword arguments, "
r"got {'a'}, "
r"missed {'b'}"):
await self.con.query("""SELECT <int64>$a + <int64>$b;""", a=1)
async def test_async_mismatched_args_05(self):
with self.assertRaisesRegex(
edgedb.QueryArgumentError,
r"expected {'a'} keyword arguments, "
r"got {'[ab]', '[ab]'}, "
r"extra {'b'}"):
await self.con.query("""SELECT <int64>$a;""", a=1, b=2)
async def test_async_args_uuid_pack(self):
obj = await self.con.query_one(
'select schema::Object {id, name} limit 1')
# Test that the custom UUID that our driver uses can be
# passed back as a parameter.
ot = await self.con.query_one(
'select schema::Object {name} filter .id=<uuid>$id',
id=obj.id)
self.assertEqual(obj, ot)
# Test that a string UUID is acceptable.
ot = await self.con.query_one(
'select schema::Object {name} filter .id=<uuid>$id',
id=str(obj.id))
self.assertEqual(obj, ot)
# Test that a standard uuid.UUID is acceptable.
ot = await self.con.query_one(
'select schema::Object {name} | |
= [os.path.splitext(fname)[0] for fname in possibly_encrypted]
if corp_fname in spl1:
ix = spl1.index(corp_fname)
db = opened_db[ix]
print("\n >>>> {} <<<<".format(db.fname()))
for k,v in db.get_all_attr().items():
print " {} = '{}';".format(k,v)
elif corp_fname in spl2:
if not encryption_key:
logger.error("Current DBFile is possibly encrypted/damaged. Please use '--encryption_key'-Option to decrypt it. ")
else:
h = DBHandler(mode="blind")
h.connect(os.path.join(main_folders["corp"],corp_fname)+".db",encryption_key=encryption_key)
try:
if h.typ() == "corpus":
print("\n >>>> {} <<<<".format(h.fname()))
for k,v in h.get_all_attr().items():
print " {} = '{}';".format(k,v)
else:
logger.error("'{}'-DB is not an CorpusDB or given encryption key ('{}') is wrong. ".format(corp_fname,encryption_key))
return False
except:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
return False
else:
logger.error("Given fname ('{}') wasn't validated. It means, that possibly it is not a CorpusDB or it is just encrypted!".format(corp_fname))
return False
else:
logger.error("Given fname ('{}') wasn't found!".format(corp_fname))
return False
elif command1 == "basic_stats":
if not corp_fname:
logger.error("'--corp_fname' is not given. (you can also give tag 'all' instead of the corp_fname)")
return False
files = get_corp_fname(main_folders)
validated,possibly_encrypted,wrong,opened_db = validate_corp(main_folders,files)
if corp_fname == "all":
for db in opened_db:
print("\n >>>> {} <<<<".format(db.fname()))
print " doc_num = '{}';".format(db.get_attr("doc_num"))
print " sent_num = '{}';".format(db.get_attr("sent_num"))
print " token_num = '{}';".format(db.get_attr("token_num"))
print "\n\nNotice! with 'all'-Argument could be checked just not-encrypted DBs. If you want to check encrypted DB use additional to corp_fname also '--encryption_key'"
else:
if corp_fname in files:
if corp_fname in validated:
ix = validated.index(corp_fname)
db = opened_db[ix]
print("\n >>>> {} <<<<".format(db.fname()))
print " doc_num = '{}';".format(db.get_attr("doc_num"))
print " sent_num = '{}';".format(db.get_attr("sent_num"))
print " token_num = '{}';".format(db.get_attr("token_num"))
elif corp_fname in possibly_encrypted:
if not encryption_key:
logger.error("Current DBFile is possibly encrypted/damaged. Please use '--encryption_key'-Option to decrypt it. ")
else:
h = DBHandler(mode="blind")
h.connect(os.path.join(main_folders["corp"],corp_fname),encryption_key=encryption_key)
try:
if h.typ() == "corpus":
print("\n >>>> {} <<<<".format(h.fname()))
print " doc_num = '{}';".format(h.get_attr("doc_num"))
print " sent_num = '{}';".format(h.get_attr("sent_num"))
print " token_num = '{}';".format(h.get_attr("token_num"))
else:
logger.error("'{}'-DB is not an CorpusDB or given encryption key ('{}') is wrong. ".format(corp_fname,encryption_key))
return False
except:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
return False
elif corp_fname in [os.path.splitext(fname)[0] for fname in files]:
spl1 = [os.path.splitext(fname)[0] for fname in validated]
spl2 = [os.path.splitext(fname)[0] for fname in possibly_encrypted]
if corp_fname in spl1:
ix = spl1.index(corp_fname)
db = opened_db[ix]
print("\n >>>> {} <<<<".format(db.fname()))
print " doc_num = '{}';".format(db.get_attr("doc_num"))
print " sent_num = '{}';".format(db.get_attr("sent_num"))
print " token_num = '{}';".format(db.get_attr("token_num"))
elif corp_fname in spl2:
if not encryption_key:
logger.error("Current DBFile is possibly encrypted/damaged. Please use '--encryption_key'-Option to decrypt it. ")
else:
h = DBHandler(mode="blind")
h.connect(os.path.join(main_folders["corp"],corp_fname)+".db",encryption_key=encryption_key)
try:
if h.typ() == "corpus":
print("\n >>>> {} <<<<".format(h.fname()))
print " doc_num = '{}';".format(h.get_attr("doc_num"))
print " sent_num = '{}';".format(h.get_attr("sent_num"))
print " token_num = '{}';".format(h.get_attr("token_num"))
else:
logger.error("'{}'-DB is not an CorpusDB or given encryption key ('{}') is wrong. ".format(corp_fname,encryption_key))
return False
except:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
return False
else:
logger.error("Given fname ('{}') wasn't validated. It means, that possibly it is not a CorpusDB or it is just encrypted!".format(corp_fname))
return False
else:
logger.error("Given fname ('{}') wasn't found!".format(corp_fname))
return False
elif command1 == "update_attr":
if not corp_fname or not attr_name or not value:
logger.error("Command is incomplete: '--corp_fname' or '--attr_name' or '--value' is not given.")
else:
files = get_corp_fname(main_folders)
if corp_fname in files:
pass
elif corp_fname in [os.path.splitext(fname)[0] for fname in files]:
corp_fname = corp_fname+".db"
else:
logger.error("Given fname ('{}') wasn't found and can not be removed.".format(corp_fname))
return False
try:
db = DBHandler(mode="error")
db.connect(os.path.join(main_folders["corp"],corp_fname),encryption_key=encryption_key)
if db._db:
if db.typ() == "corpus":
if attr_name not in db.get_all_attr():
logger.error("Given Attribute ('{}') is not exist in this DataBase.".format(attr_name))
return False
db.update_attr(attr_name,value)
db._commit()
updated_attr = db.get_attr(attr_name)
#p((updated_attr, value))
if str(value) != str(updated_attr):
logger.error("Update of the given Attribute ('{}') failed.".format(attr_name))
return
else:
logger.info("Given Attribute ('{}') in the '{}'-DB was updated to '{}'.".format(attr_name,corp_fname, value))
return True
else:
logger.error("'{}'-DB is not an CorpusDB or given encryption key ('{}') is wrong. ".format(corp_fname,encryption_key))
return False
else:
#p(type(strtobool(encryption_key)), "strtobool(encryption_key)")
if strtobool(encryption_key):
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
else:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or encrypted or locked. Please use '--encryption_key' option to set an encryption_key ".format(corp_fname,encryption_key))
return False
except:
if strtobool(encryption_key):
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
else:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or encrypted or locked. Please use '--encryption_key' option to set an encryption_key ".format(corp_fname,encryption_key))
return False
elif command1 == "export":
if not corp_fname or not type_to_export:
logger.error("Command is incomplete: '--corp_fname' or '--type_to_export' is not given.")
return False
else:
files = get_corp_fname(main_folders)
export_dir = export_dir if export_dir else main_folders["export"]
export_name = export_name if export_name else "Export{}".format(int(time.time()))
if corp_fname in files:
pass
elif corp_fname in [os.path.splitext(fname)[0] for fname in files]:
corp_fname = corp_fname+".db"
else:
logger.error("Given fname ('{}') wasn't found and can not be removed.".format(corp_fname))
return False
corp = Corpus(mode="error")
corp.open(os.path.join(main_folders["corp"],corp_fname),encryption_key=encryption_key)
def intern_getter():
if status_bar:
status_bars_manager = _get_status_bars_manager()
status_bar_start = _get_new_status_bar(None, status_bars_manager.term.center("Exporter") , "", counter_format=status_bars_manager.term.bold_white_on_green("{fill}{desc}{fill}"),status_bars_manager=status_bars_manager)
status_bar_start.refresh()
#if status_bar:
status_bar_current = _get_new_status_bar(num, "Exporting:", "row",status_bars_manager=status_bars_manager)
for item in corp.docs(output="dict"):
if status_bar:
status_bar_current.update(incr=1)
yield item
if status_bar:
status_bar_total_summary = _get_new_status_bar(None, status_bars_manager.term.center("Exported: Rows: '{}'; ".format(num) ), "", counter_format=status_bars_manager.term.bold_white_on_green('{fill}{desc}{fill}\n'),status_bars_manager=status_bars_manager)
status_bar_total_summary.refresh()
status_bars_manager.stop()
if corp.corpdb._db:
num = corp.corpdb.rownum("documents")
exporter = Exporter(intern_getter(),rewrite=False,silent_ignore=False , mode=mode)
if type_to_export not in Exporter.supported_file_formats:
logger.error("Given Export Type ('{}') is not supported.".format(type_to_export))
return False
if type_to_export == "csv":
#p(cols,"cols")
cols = corp.corpdb.col("documents")
exporter.tocsv(export_dir, export_name, cols, rows_limit_in_file=rows_limit_in_file)
elif type_to_export == "xml":
exporter.toxml(export_dir, export_name, rows_limit_in_file=rows_limit_in_file)
elif type_to_export == "json":
exporter.tojson(export_dir, export_name, rows_limit_in_file=rows_limit_in_file)
else:
logger.error("Given Export Type ('{}') is not supported.".format(type_to_export))
return False
else:
#p(type(strtobool(encryption_key)), "strtobool(encryption_key)")
if strtobool(encryption_key):
logger.error("'{}'-DB wasn't opened. Possibly this DB is locked or damaged or given encryption key ('{}') is wrong. ".format(corp_fname,encryption_key))
else:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or encrypted or locked. Please use '--encryption_key' option to set an encryption_key ".format(corp_fname,encryption_key))
return False
elif command1 == "used_tools":
term = Terminal()
for tool_name, data in CorpusData.info.items():
print "\n\n\n\n"
print term.bold_white_on_magenta(" >>>>> {} <<<<< ".format(tool_name))
if tool_name == "tagger":
for tagger_name, infos in data.items():
print term.bold_white_on_cyan(" TaggerName: {} ".format(tagger_name))
print "\t\t"+json.dumps(CorpusData.info[tool_name][tagger_name], sort_keys=True, indent=5).replace("\n", "\n\t")
print "\n\n"
else:
print "\t"+json.dumps(CorpusData.info[tool_name], sort_keys=True, indent=5).replace("\n", "\n\t")
elif command1 == "cols":
if not corp_fname :
logger.error("Command is incomplete: '--corp_fname' is not given.")
else:
files = get_corp_fname(main_folders)
if corp_fname in files:
pass
elif corp_fname in [os.path.splitext(fname)[0] for fname in files]:
corp_fname = corp_fname+".db"
else:
logger.error("Given fname ('{}') wasn't found and can not be removed.".format(corp_fname))
return False
try:
db = DBHandler(mode="error")
db.connect(os.path.join(main_folders["corp"],corp_fname),encryption_key=encryption_key)
if db._db:
if db.typ() == "corpus":
print " Columns in the DocumentTable for {} :".format(db.fname())
i = 0
temp = []
for col in db.col("documents"):
i += 1
if i < 4:
temp.append(col)
else:
print " {}".format(temp)
temp = []
i = 0
if temp:
print " {}".format(temp)
else:
logger.error("'{}'-DB is not an CorpusDB or given encryption key ('{}') is wrong. ".format(corp_fname,encryption_key))
return False
else:
#p(type(strtobool(encryption_key)), "strtobool(encryption_key)")
if strtobool(encryption_key):
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
else:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or encrypted or locked. Please use '--encryption_key' option to set an encryption_key ".format(corp_fname,encryption_key))
return False
except:
if strtobool(encryption_key):
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or the encryption key was wrong'{}' ".format(corp_fname,encryption_key))
else:
logger.error("'{}'-DB wasn't opened. Possibly this DB is damaged or encrypted or locked. Please use '--encryption_key' option to set an encryption_key ".format(corp_fname,encryption_key))
return False
elif command1 == "doc":
if not corp_fname or not doc_id:
logger.error("Command is incomplete: '--corp_fname' , '--doc_id' is not given.")
return False
else:
files = get_corp_fname(main_folders)
if corp_fname in files:
pass
elif corp_fname in [os.path.splitext(fname)[0] for fname in files]:
corp_fname = corp_fname+".db"
else:
logger.error("Given fname ('{}') wasn't found and can not be removed.".format(corp_fname))
return False
try:
corp = Corpus(mode="error")
corp.open(os.path.join(main_folders["corp"],corp_fname),encryption_key=encryption_key)
if corp.corpdb:
#p(corp._id_field_name, "corp._id_field_name")
getted_docs = list(corp.docs( where="{}='{}'".format(corp._id_field_name, doc_id), output="dict"))
#p(getted_docs, "getted_docs")
print "\n >>> {} <<< :".format(corp.corpdb.fname())
print " (Matched DocItems for doc_id: | |
tvm_out)
def test_where():
condition = np.array([[1, 0], [1, 1]], dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
y = np.array([[9, 8], [7, 6]], dtype=np.int64)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.INT64, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[9, 8], [7, 6]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array(1, dtype=np.float32)
y = np.array([2], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([2], dtype=np.float32)
y = np.array(1, dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
condition = np.array(1, dtype=np.bool)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[5, 6], [7, 8]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[1], [7]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
def verify_or(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_or(x, y)
node = helper.make_node('Or', inputs=['in1', 'in2'], outputs=['out'], )
graph = helper.make_graph([node],
'or_test',
inputs=[helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))])
model = helper.make_model(graph, producer_name='or_test')
for target, ctx in ctx_list():
tvm_out = get_tvm_output(model, [x, y], target, ctx, outdata.shape)
tvm.testing.assert_allclose(outdata, tvm_out)
def test_or():
# 2d
x = (np.random.randn(3, 4) > 0)
y = (np.random.randn(3, 4) > 0)
verify_or(indata=[x, y], dtype=bool)
# 3d
x = (np.random.randn(3, 4, 5) > 0)
y = (np.random.randn(3, 4, 5) > 0)
verify_or(indata=[x, y], dtype=bool)
# 4d
x = (np.random.randn(3, 4, 5, 6) > 0)
y = (np.random.randn(3, 4, 5, 6) > 0)
verify_or(indata=[x, y], dtype=bool)
# 3d vs 1d
x = (np.random.randn(3, 4, 5) > 0)
y = (np.random.randn(5) > 0)
verify_or(indata=[x, y], dtype=bool)
# 3d vs 2d
x = (np.random.randn(3, 4, 5) > 0)
y = (np.random.randn(4, 5) > 0)
verify_or(indata=[x, y], dtype=bool)
def test_batch_norm():
def verify_batch_norm(in_shape):
batchnorm = onnx.helper.make_node('BatchNormalization',
inputs=["x", "scale", "B", "mean", "var"],
outputs=['Y'])
graph = helper.make_graph([batchnorm],
"batchnorm_test",
inputs=[helper.make_tensor_value_info("x",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("scale",
TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B",
TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean",
TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var",
TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("Y",
TensorProto.FLOAT, list(in_shape))])
model = helper.make_model(graph, producer_name='batchnorm_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('float32')
scale = np.random.uniform(size=in_shape[1]).astype('float32')
b = np.random.uniform(size=in_shape[1]).astype('float32')
mean = np.random.uniform(size=in_shape[1]).astype('float32')
var = np.random.uniform(size=in_shape[1]).astype('float32')
onnx_out = get_onnxruntime_output(model, [x, scale, b, mean, var], 'float32')[0]
tvm_out = get_tvm_output(model, [x, scale, b, mean, var], target, ctx, in_shape, 'float32')
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
verify_batch_norm([1, 3, 224, 224])
verify_batch_norm([1, 3, 24, 24])
verify_batch_norm([16, 3, 24, 24])
verify_batch_norm([16, 16, 24, 24])
verify_batch_norm([16, 16, 10, 10])
def test_batch_norm_dynamic_subgraph():
def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):
batchnorm = onnx.helper.make_node('BatchNormalization',
inputs=["x", "scale", "B", "mean", "var"],
outputs=['Y'])
shape_node = helper.make_node("Shape", ['Y'], ['shape'])
reshape_node = helper.make_node("Reshape", ["in", "shape"], ["out"])
graph = helper.make_graph([batchnorm, shape_node, reshape_node],
"batchnorm_test",
inputs=[helper.make_tensor_value_info("x",
TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("in",
TensorProto.FLOAT, list(o_shape)),
helper.make_tensor_value_info("scale",
TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B",
TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean",
TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var",
TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("out",
TensorProto.FLOAT, list(in_shape))])
model = helper.make_model(graph, producer_name='batchnorm_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=in_shape).astype('float32')
inp = np.random.uniform(size=o_shape).astype('float32')
scale = np.random.uniform(size=in_shape[1]).astype('float32')
b = np.random.uniform(size=in_shape[1]).astype('float32')
mean = np.random.uniform(size=in_shape[1]).astype('float32')
var = np.random.uniform(size=in_shape[1]).astype('float32')
onnx_out = get_onnxruntime_output(model, [x, inp, scale, b, mean, var], 'float32')[0]
tvm_out = get_tvm_output(model, [x, inp, scale, b, mean, var], target, ctx, in_shape, 'float32')
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])
def verify_conv(x_shape, w_shape, y_shape, padding, kernel_shape, strides, dilations, auto_pad="NOTSET", unset_pad=False):
if unset_pad:
node = helper.make_node('Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
)
elif padding is None:
node = helper.make_node('Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
auto_pad=auto_pad)
else:
node = helper.make_node('Conv',
inputs=['x', 'W'],
outputs=['y'],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
pads=padding)
graph = helper.make_graph([node],
'conv_test',
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))])
model = helper.make_model(graph, producer_name='conv_test')
for target, ctx in ctx_list():
x = np.random.uniform(size=x_shape).astype('float32')
W = np.random.uniform(size=w_shape).astype('float32')
tvm_out = get_tvm_output(model, [x, W], target, ctx, y_shape)
onnx_out = get_onnxruntime_output(model, [x, W], 'float32')[0]
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_conv():
def repeat(N, D):
return tuple([N for _ in range(D)])
for D in [1, 2, 3]:
# Convolution with padding
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(1, D),
repeat(3, D),
repeat(1, D),
repeat(1, D))
# Convolution without padding
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D))
# Convolution with autopadding
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
None,
repeat(3, D),
repeat(1, D),
repeat(1, D),
auto_pad="SAME_UPPER")
# Convolution with unset padding
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
2 * repeat(0, D),
repeat(3, D),
repeat(1, D),
repeat(1, D),
True)
# Convolution with non uniform stride
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(3, D),
None,
repeat(3, D),
repeat(2, D),
repeat(1, D),
auto_pad="SAME_UPPER")
# Convolution with dilation
verify_conv((1, 1) + repeat(5, D),
(1, 1) + repeat(3, D),
(1, 1) + repeat(5, D),
2 * repeat(2, D),
repeat(3, D),
repeat(1, D),
repeat(2, D))
def verify_convtranspose(x_shape, w_shape, y_shape, p):
node = onnx.helper.make_node("ConvTranspose",
inputs=["x", "W"],
outputs=['y'],
strides=[3, 2],
group=1,
kernel_shape=[3, 3],
pads=p)
graph = helper.make_graph([node],
'verify_convtranspose_test',
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))])
model = helper.make_model(graph, producer_name='convtranspose_trest')
for target, ctx in ctx_list():
x = np.random.uniform(size=x_shape).astype('float32')
W = np.random.uniform(size=w_shape).astype('float32')
tvm_out = get_tvm_output(model, [x, W], target, ctx, y_shape)
onnx_out = get_onnxruntime_output(model, [x, W], 'float32')[0]
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_convtranspose():
# Convolution Transpose with padding
# (1, 1, 3, 3) input tensor
# (1, 2, 3, 3) tensor for convolution weights
# (1, 2, 7, 3) output tensor
# [1, 2, 1, 2] list for pads
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])
def test_unsqueeze_constant():
from torch.nn import Linear, Sequential, Module
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
import tempfile
with tempfile.NamedTemporaryFile() as fp:
file_name = fp.name
input_size = (1, 16, 32, 32)
dummy_input = torch.randn(*input_size)
layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))
torch.onnx.export(layer, dummy_input, file_name, export_params=True)
onnx_model = onnx.load(file_name)
relay.frontend.from_onnx(onnx_model, {'0': input_size})
def verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad="NOTSET"):
x_np = np.random.uniform(size=x_shape).astype('float32')
if mode == 'max':
node_type = "MaxPool"
elif mode == 'average':
node_type = "AveragePool"
else:
raise ValueError("Pool method {} is not supported.".format(mode))
pool_node = helper.make_node(
node_type, inputs=["x"], outputs=["y"], kernel_shape=kernel_shape, strides=strides)
if pads is None:
pad_attr = helper.make_attribute('auto_pad', auto_pad)
else:
pad_attr = helper.make_attribute('pads', pads)
pool_node.attribute.append(pad_attr)
if mode == 'max':
storage_attr = helper.make_attribute('storage_order', 0)
pool_node.attribute.append(storage_attr)
graph = helper.make_graph([pool_node],
"pooling_test",
inputs=[helper.make_tensor_value_info("x",
TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y",
TensorProto.FLOAT, list(out_shape))])
model = helper.make_model(graph, producer_name='pooling_test')
for target, ctx in ctx_list():
onnx_out = get_onnxruntime_output(model, x_np, 'float32')
tvm_out = get_tvm_output(
model, [x_np], target, ctx, out_shape)
tvm.testing.assert_allclose(onnx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_pooling():
for mode in ['max', 'average']:
# Pool1D
verify_pooling(x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[1],
pads=[1, 1],
out_shape=[1, 1, 32],
mode=mode)
# Pool2D
verify_pooling(x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
mode=mode)
# Pool1D with stride
verify_pooling(x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=[1, 1],
out_shape=[1, 1, 16],
mode=mode)
# Pool2D with stride
verify_pooling(x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
mode=mode)
# Pool1D with stride and autopadding
verify_pooling(x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=None,
out_shape=[1, 1, 16],
mode=mode,
auto_pad='SAME_UPPER')
# Pool2D with stride and autopadding
verify_pooling(x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
mode=mode,
auto_pad='SAME_UPPER')
# Pool3D with stride
verify_pooling(x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
mode=mode)
# Pool3D with stride and autopadding
verify_pooling(x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
mode=mode,
auto_pad='SAME_UPPER')
def verify_mod(x_shape, y_shape, fmod, dtype='float32'):
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
y_np = np.where(y_np==0, 1, y_np) #remove 0's to avoid division by zero error
if fmod:
np_out = np.fmod(x_np, y_np)
else:
np_out = np.mod(x_np, y_np)
out_shape = np_out.shape
| |
# -*- coding: utf-8 -*-
################################################
###### Copyright (c) 2016, <NAME>
###
import numpy as np
import itertools
import time
from .categoryaction import CatObject
class MultQ(object):
def __init__(self,x):
"""Initializes an element of the multiplicative quantale.
Parameters
----------
x: a float value between 0 and 1
Returns
-------
None
Raise an exception if the float value is not in the interval [0,1].
"""
if x<0 or x>1:
raise Exception("Real number should be comprised between 0 and 1")
self.x = x
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the multiplicative quantale for the monoid operation.
"""
return MultQ(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return MultQ(0.0)
def __mul__(self,rhs):
"""Compose two numbers in the multiplicative quantale
Overloads the '*' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
The product self * rhs.
In the case of the multiplicative quantale, it is the ordinary
product of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.__class__(self.x*rhs.x)
def __add__(self,rhs):
"""Compute the supremum in the multiplicative quantale
Overloads the '+' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
The supremum self v rhs.
In the case of the multiplicative quantale, 'v' is the maximum
of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.__class__(max([self.x,rhs.x]))
def __eq__(self,rhs):
"""Checks if the two numbers in the multiplicative quantale are equal.
Overloads the '==' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x==rhs.x
def __lt__(self,rhs):
"""Checks if the given number is strictly inferior to the rhs given the
poset structure of the multiplicative quantale.
Overloads the '<' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is strictly inferior 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x<rhs.x
def __le__(self,rhs):
"""Checks if the given number is inferior to the rhs given the
poset structure of the multiplicative quantale.
Overloads the '<=' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is inferior or equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x<=rhs.x
def __str__(self):
"""Returns a verbose description of the number in the multiplicative
quantale.
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A string description of the number value.
"""
return str(self.x)
def __repr__(self):
return "MultQ({})".format(self.x)
class IntvQ(object):
def __init__(self,x):
"""Initializes an element of the interval quantale.
Parameters
----------
x: a float value between 0 and 1
Returns
-------
None
Raise an exception if the float value is not in the interval [0,1].
"""
if x<0 or x>1:
raise Exception("Real number should be comprised between 0 and 1")
self.x = x
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the multiplicative quantale for the monoid operation.
"""
return IntvQ(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return IntvQ(0.0)
def __mul__(self,rhs):
"""Compose two numbers in the interval quantale
Overloads the '*' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
The product self * rhs.
In the case of the interval quantale, it is the min of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.__class__(min([self.x,rhs.x]))
def __add__(self,rhs):
"""Compute the supremum in the interval quantale
Overloads the '+' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
The supremum self v rhs.
In the case of the interval quantale, 'v' is the maximum
of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.__class__(max([self.x,rhs.x]))
def __eq__(self,rhs):
"""Checks if the two numbers in the interval quantale are equal.
Overloads the '==' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x==rhs.x
def __lt__(self,rhs):
"""Checks if the given number is strictly inferior to the rhs given the
poset structure of the interval quantale.
Overloads the '<' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is strictly inferior 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x<rhs.x
def __le__(self,rhs):
"""Checks if the given number is inferior to the rhs given the
poset structure of the interval quantale.
Overloads the '<=' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is inferior or equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x<=rhs.x
def __str__(self):
"""Returns a verbose description of the number in the interval
quantale.
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A string description of the number value.
"""
return str(self.x)
def __repr__(self):
return "IntvQ({})".format(self.x)
class Lin3Q(IntvQ):
def __init__(self,x):
"""Initializes an element of the linear order quantale with 3 elements.
It is a sub-quantale of the interval quantale with values 0, 1/2, and 1.
Parameters
----------
x: a float value between being either 0, 1/2, or 1.
Returns
-------
None
Raise an exception if the float value is not one of the above-mentionned
values.
"""
if not (x==0 or x==0.5 or x==1):
raise Exception("The possibles values are 0, 1/2, and 1")
super().__init__(x)
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the linear order quantale for the monoid operation.
"""
return Lin3Q(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return Lin3Q(0.0)
def __str__(self):
return str(self.x)
def __repr__(self):
return "Lin3Q({})".format(self.x)
########################################################
class QMorphism(object):
def __init__(self,name,source,target,qtype=None,mapping=None):
"""Initializes a quantaloid morphism between two sets.
Parameters
----------
name: a string representing the name of the morphism
source: an instance of CatObject representing the domain of the morphism
target: an instance of CatObject representing the codomain of
the morphism
qtype: class of quantale for the morphism
mapping: optional argument representing the mapping of elements
between the domain and the codomain. The mapping can be
given as a NumPy array matrix or as a dictionary.
Returns
-------
None
Raises an exception if
- the source is not an instance of a CatObject
- the target is not an instance of a CatObject
- the type (class) of quantale is not specified
"""
if not isinstance(source,CatObject):
raise Exception("Source is not a valid CatObject class\n")
if not isinstance(target,CatObject):
raise Exception("Target is not a valid CatObject class\n")
if qtype is None:
raise Exception("Type of quantale should be specified")
self.name = name
self.source = source
self.target = target
self.qtype = qtype
if mapping is not None:
if isinstance(mapping,np.ndarray)==False:
self.set_mapping(mapping)
else:
self.set_mapping_matrix(mapping)
def set_name(self,name):
"""Sets the name of the morphism
Parameters
----------
name: a string representing the new name of the morphism
Returns
-------
None
"""
if not len(name):
raise Exception("The specified morphism name is empty")
self.name = name
def set_to_identity(self):
"""Sets the morphism to be an identity morphism. The domain and codomain
must be identical.
Parameters
----------
None
Returns
-------
None
"""
if not (self.source==self.target):
raise Exception("Source and target should be identical")
card_source = self.source.get_cardinality()
M = np.empty((card_source,card_source),dtype=self.qtype)
for i in range(card_source):
for j in range(card_source):
if | |
<filename>dim-testsuite/tests/dns_test.py
from dim import db
from dim.dns import get_ip_from_ptr_name
from dim.rrtype import validate_strings
from dim.errors import InvalidParameterError, AlreadyExistsError, InvalidZoneError, DimError
from tests.util import RPCTest, raises
def test_validate_strings():
validate_strings(None, 'strings', [r'''\"\\\223'''])
validate_strings(None, 'strings', [r'''\"\\\223'''])
def rrs(coll, fields=('record', 'zone', 'type', 'value')):
if not coll:
return set()
if isinstance(coll[0], dict):
return set(tuple(rr[field] for field in fields) for rr in coll
if 'type' not in fields or rr['type'] != 'SOA')
else:
return set(coll)
def print_messages(result):
print('\n'.join(m[1] for m in result['messages']))
def test_get_ip_from_ptr_name():
assert get_ip_from_ptr_name('1.2.3.4.in-addr.arpa.') == '4.3.2.1'
assert get_ip_from_ptr_name('1.2/32.2.3.4.in-addr.arpa.') == '4.3.2.1'
assert get_ip_from_ptr_name('1.2/32.2.3.4.in-addr.arpa.') == '4.3.2.1'
assert get_ip_from_ptr_name('2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.') == \
'fc00:e968:6179::de52:7100'
with raises(ValueError):
get_ip_from_ptr_name('abc')
with raises(ValueError):
get_ip_from_ptr_name('1.3.4.in-addr.arpa.')
class ZoneTest(RPCTest):
def test_create_zone(self):
with raises(InvalidParameterError):
self.r.zone_create('a 0.com')
with raises(InvalidParameterError):
self.r.zone_create('test.com', soa_attributes={'a': 1})
self.r.zone_create('test.com')
with raises(AlreadyExistsError):
self.r.zone_create('test.com')
with raises(InvalidParameterError):
self.r.zone_create('test.com.')
with raises(InvalidParameterError):
self.r.zone_create('test-')
def test_zone_rename(self):
self.r.zone_create('internal', profile=True)
self.r.rr_create(name='internal.', type='NS', nsdname='external.')
self.r.rr_create(name='a.internal.', type='CNAME', cname='c')
self.r.zone_rename('internal', 'public', profile=True)
assert self.r.zone_list(profile=True) == [{'name': 'public'}]
assert rrs(self.r.rr_list(zone='public', profile=True)) == rrs([
('@', 'public', 'NS', 'external.'),
('a', 'public', 'CNAME', 'c')])
with raises(InvalidParameterError):
self.r.zone_rename('public', 'private', profile=False)
def test_add_view_1(self):
self.r.zone_create('test.com')
self.r.zone_create_view('test.com', 'view')
assert self.r.zone_list_views('test.com') == [{'name': 'default'}, {'name': 'view'}]
def test_rename_view(self):
self.r.zone_create('test.com')
self.r.zone_create_view('test.com', 'view')
self.r.zone_rename_view('test.com', 'view', 'test')
assert self.r.zone_list_views('test.com') == [{'name': 'default'}, {'name': 'test'}]
def test_add_view_2(self):
self.r.zone_create('profile', profile=True)
with raises(DimError):
self.r.zone_create_view('profile', 'test')
def test_attrs(self):
self.r.zone_create('test.com', attributes={'a': 'b'}, soa_attributes={'primary': 'c.'})
assert self.r.zone_get_attrs('test.com')['a'] == 'b'
self.r.zone_set_attrs('test.com', {'a': '1'})
assert self.r.zone_get_attrs('test.com')['a'] == '1'
self.r.zone_delete_attrs('test.com', ['a'])
assert 'a' not in self.r.zone_get_attrs('test.com')
assert self.r.zone_get_soa_attrs('test.com')['primary'] == 'c.'
self.r.zone_set_soa_attrs('test.com', {'primary': 'd.'})
assert self.r.zone_get_soa_attrs('test.com')['primary'] == 'd.'
def test_profiles(self):
self.r.zone_create('internal', profile=True, soa_attributes=dict(mail='a.b.com.', refresh='1337', expire=1))
self.r.zone_create('test.com', from_profile='internal', soa_attributes=dict(refresh='47'))
assert self.r.zone_get_soa_attrs('test.com')['refresh'] == 47
assert self.r.zone_get_soa_attrs('test.com')['mail'] == 'a.b.com.'
with raises(InvalidZoneError):
self.r.zone_delete('internal', profile=False)
with raises(InvalidZoneError):
self.r.zone_delete('test.com', profile=True)
self.r.zone_delete('internal', profile=True)
self.r.zone_delete('test.com')
def test_profile_rrs(self):
self.r.zone_create('profile', profile=True)
self.r.rr_create(name='@', zone='profile', type='NS', nsdname='whatever.com.', profile=True)
self.r.rr_create(name='a', zone='profile', type='TXT', strings='"something"', profile=True)
self.r.zone_create('test.com', from_profile='profile')
assert rrs(self.r.rr_list('*test.com.')) == rrs(
[('a', 'test.com', 'TXT', '"something"'),
('@', 'test.com', 'NS', 'whatever.com.')])
def test_list_zone(self):
self.r.zone_create('some.domain', soa_attributes=dict(primary='ns01.company.com.', mail='dnsadmin.company.com.'))
self.r.rr_create(name='some.domain.', type='MX', preference=10, exchange='mail.other.domain.', ttl=1200)
self.r.rr_create(name='www.some.domain.', type='A', ip='192.168.78.2')
records = self.r.rr_list(zone='some.domain')
assert records[0]['type'] == 'SOA' and records[0]['value'].startswith('ns01.company.com. dnsadmin.company.com')
assert rrs([('@', 'some.domain', 1200, 'MX', '10 mail.other.domain.'),
('www', 'some.domain', None, 'A', '192.168.78.2')])\
<= rrs(records, fields=('record', 'zone', 'ttl', 'type', 'value'))
def test_zone_list_underscore(self):
self.r.zone_create('nounderscore.com')
self.r.zone_create('with_underscore.com')
assert self.r.zone_list() == [
{'name': 'nounderscore.com'},
{'name': 'with_underscore.com'}]
assert self.r.zone_list('*_*') == [{'name': 'with_underscore.com'}]
def test_zone_list(self):
self.r.zone_create('profile.domain', profile=True)
self.r.zone_create('master.domain')
self.r.zone_create('no-no.domain')
self.r.zone_create('multipleviews.domain')
self.r.zone_create_view('multipleviews.domain', 'secondview')
self.r.zone_create('second.domain')
self.r.zone_group_create('zg')
self.r.zone_group_create('zg2')
self.r.zone_group_create('zg3')
self.r.zone_group_add_zone('zg', 'master.domain')
self.r.zone_group_add_zone('zg2', 'master.domain')
self.r.zone_group_add_zone('zg', 'second.domain')
self.r.zone_group_add_zone('zg', 'multipleviews.domain', 'default')
self.r.zone_group_add_zone('zg2', 'multipleviews.domain', 'secondview')
self.r.zone_group_add_zone('zg3', 'multipleviews.domain', 'default')
assert rrs(self.r.zone_list('*domain', profile=False, fields=True),
fields=('name', 'views', 'zone_groups')) == rrs(
[('second.domain', 1, 1),
('master.domain', 1, 2),
('multipleviews.domain', 2, 3),
('no-no.domain', 1, 0)
])
assert rrs(self.r.zone_list('*domain', profile=True, fields=True),
fields=('name',)) == rrs([('profile.domain',)])
assert rrs(self.r.zone_list('*domain', profile=False, fields=True),
fields=('name', 'views')) == rrs(
[('second.domain', 1),
('master.domain', 1),
('no-no.domain', 1),
('multipleviews.domain', 2)
])
assert self.r.zone_list(profile=True) == [{'name': 'profile.domain'}]
assert set([x['name'] for x in self.r.zone_list(profile=False)]) == set(
['master.domain',
'no-no.domain',
'multipleviews.domain',
'second.domain'
])
assert set([x['name'] for x in self.r.zone_list(profile=False, limit=2, offset=1)]) == set(
['multipleviews.domain',
'no-no.domain'
])
assert self.r.zone_count(profile=False) == 4
def test_zone_list_alias(self):
assert len(self.r.zone_list(alias=1)) == 0
assert self.r.zone_count(alias='a') == 0
self.r.zone_create('a.de')
assert [x['name'] for x in self.r.zone_list(profile=False, alias=True)] == ['a.de']
def test_revzone_profiles(self):
self.r.zone_create('revzone-profile', profile=True, soa_attributes={'primary': 'revzone.'})
self.r.ipblock_create('172.16.31.10/8', status='Container', attributes={'reverse_dns_profile': 'revzone-profile'})
self.r.ippool_create('pool')
self.r.ippool_add_subnet('pool', '172.16.31.10/23')
assert self.r.zone_get_soa_attrs('1.0.12.in-addr.arpa')['primary'] == 'revzone.'
def test_revzone_ipv6(self):
self.r.ipblock_create('2001:db8::/32', status='Container')
self.r.ippool_create('pool')
self.r.ippool_add_subnet('pool', '20fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b/126')
assert len(self.r.zone_list('a.0.0.0.0.0.1.0.8.b.d.0.1.0.0.2.ip6.arpa')) == 1
def test_subzone(self):
self.r.zone_create('server.lan')
self.r.rr_create(name='srv-monitoring.company.com.', type='TXT', strings=['test'])
self.r.rr_create(name='monitoring.company.com.', type='TXT', strings=['test2'])
self.r.zone_create('monitoring.company.com')
assert rrs(self.r.rr_list(zone='company.com', type='TXT')) == rrs([
('srv-monitoring', 'company.com', 'TXT', '"test"')])
assert rrs(self.r.rr_list(zone='monitoring.company.com', type='TXT')) == rrs([
('@', 'monitoring.company.com', 'TXT', '"test2"')])
def test_dnssec_attrs(self):
self.r.zone_create('test.com')
self.r.zone_set_attrs('test.com', {'default_algorithm': '8'})
self.r.zone_set_attrs('test.com', {'default_ksk_bits': 2048})
self.r.zone_set_attrs('test.com', {'default_zsk_bits': 1024})
with raises(InvalidParameterError):
self.r.zone_set_attrs('test.com', {'default_algorithm': 'rsasha1'})
with raises(InvalidParameterError):
self.r.zone_set_attrs('test.com', {'default_ksk_bits': 'a'})
with raises(InvalidParameterError):
self.r.zone_set_attrs('test.com', {'default_zsk_bits': 'a'})
def test_favorites(self):
# Test for a zone with a single view
self.r.zone_create('a.de')
assert self.r.zone_list2(favorite_only=True)['count'] == 0
assert not self.r.zone_favorite('a.de')
self.r.zone_favorite_add('a.de')
assert self.r.zone_favorite('a.de')
print(self.r.zone_list2(favorite_only=True))
assert self.r.zone_list2(favorite_only=True)['data'][0]['name'] == 'a.de'
self.r.zone_favorite_remove('a.de')
assert not self.r.zone_favorite('a.de')
class RR(RPCTest):
def test_create_twice(self):
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.ip_mark('172.16.58.3')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3', overwrite_ptr=True)
self.r.zone_delete('test.com', cleanup=True)
assert rrs(self.r.rr_list(pattern='*0.0.12.in-addr.arpa.')) == rrs([])
def test_rr_create_invalid_profile(self):
with raises(InvalidZoneError):
self.r.rr_create(profile=True, type='NS', nsdname='a.', zone='inexistent', name='@')
def test_create_invalid_record_name(self):
self.r.zone_create('a.de')
self.r.rr_create(name='a.de.', type='TXT', strings=['text'], zone='a.de')
with raises(InvalidParameterError):
self.r.rr_create(name='suba.de.', type='TXT', strings=['text'], zone='a.de')
def test_rr_delete_1(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='cname', cname='b.test.com.')
assert len(rrs(self.r.rr_list())) == 1
self.r.rr_delete(name='a.test.com.', type='cname', cname='b.test.com.')
assert len(rrs(self.r.rr_list())) == 0
def test_rr_delete_2(self):
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')
assert len(rrs(self.r.rr_list())) == 2
self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3', free_ips=True)
assert len(rrs(self.r.rr_list())) == 0
assert self.r.ipblock_get_attrs('172.16.58.3')['status'] == 'Available'
def test_rr_delete_3(self):
self.r.ipblock_create('12::/32', status='Container')
self.r.zone_create('test.com')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '12::/64')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')
self.r.rr_create(name='a.test.com.', type='aaaa', ip='fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3')
assert rrs(self.r.rr_list('a.test.com.')) == rrs([
('a', 'test.com', 'AAAA', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0', '0.0.0.0.0.0.0.0.0.0.0.0.2.1.0.0.ip6.arpa', 'PTR', 'a.test.com.')])
def test_rr_delete_4(self):
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')
self.r.rr_create(name='b.test.com.', type='a', ip='172.16.58.3', overwrite_ptr=True)
self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3')
assert not self.r.rr_list('a.test.com.')
assert rrs(self.r.rr_list('b.test.com.')) == rrs([
('b', 'test.com', 'A', '172.16.58.3'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])
def test_rr_delete_5(self):
# trigger recursive delete via rr_delete(ptr)
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')
self.r.rr_create(name='b.test.com.', type='cname', cname='a')
self.r.rr_delete(ip='172.16.58.3', type='ptr', ptrdname='a.test.com.', references='delete')
assert rrs(self.r.rr_list()) == set()
def test_rr_delete_6(self):
# delete only one forward reference; expect ptr unchanged
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.zone_create('test.com')
self.r.zone_create_view('test.com', 'other')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3', views=['default', 'other'])
self.r.rr_delete(name='a.test.com.', type='a', ip='172.16.58.3', views=['default'])
assert rrs(self.r.rr_list()) == rrs([
('a', 'test.com', 'A', '172.16.58.3'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'a.test.com.')])
def test_rr_delete_by_id(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='a', ip='172.16.58.3')
rr_id = self.r.rr_get_references(name='a.test.com.', type='A')['root']
with raises(InvalidParameterError):
self.r.rr_delete(ids=rr_id)
self.r.rr_delete(ids=[rr_id], zone='a.de')
self.r.rr_delete(ids=[rr_id], unknown='a')
self.r.rr_delete(ids=[rr_id])
def test_ptr_overwrite(self):
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.ip_mark('172.16.58.3')
self.r.ip_mark('192.168.3.11')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='A', ip='172.16.58.3')
self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='b.test.com.')
assert rrs(self.r.rr_list(pattern='*')) == rrs(
[('a', 'test.com', 'A', '172.16.58.3'),
('b', 'test.com', 'A', '172.16.58.3'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'a.test.com.')])
self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='b.test.com.', overwrite_ptr=True)
assert rrs(self.r.rr_list(pattern='*')) == rrs(
[('a', 'test.com', 'A', '172.16.58.3'),
('b', 'test.com', 'A', '172.16.58.3'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])
self.r.rr_create(name='b.test.com.', type='A', ip='192.168.3.11')
self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='b.test.com.', overwrite_ptr=True, overwrite_a=True)
assert rrs(self.r.rr_list(pattern='*')) == rrs(
[('a', 'test.com', 'A', '172.16.58.3'),
('b', 'test.com', 'A', '172.16.58.3'),
('1', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.'),
('2', '0.0.12.in-addr.arpa', 'PTR', 'b.test.com.')])
def test_create_a(self):
self.r.ip_mark('172.16.58.3')
self.r.ip_mark('192.168.3.11')
self.r.ip_mark('fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
self.r.ip_mark('fdf8:f53e:61e4::18')
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='A', ip='172.16.58.3', ttl=1)
self.r.rr_create(name='b.test.com.', type='A', ip='192.168.3.11')
self.r.rr_create(name='c.test.com.', type='AAAA', ip='fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b')
self.r.rr_create(name='d.test.com.', type='AAAA', ip='fdf8:f53e:61e4::18')
assert rrs(self.r.rr_list('*test.com.')) == rrs(
[('a', 'test.com', 'A', '172.16.58.3'),
('b', 'test.com', 'A', '192.168.3.11'),
('c', 'test.com', 'AAAA', 'fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b'),
('d', 'test.com', 'AAAA', 'fdf8:f53e:61e4::18')])
def test_create_a2(self):
# ND-57
self.r.zone_create('test.com')
with raises(InvalidParameterError):
self.r.rr_create(name='test.com.', type='A', ip='::1')
with raises(InvalidParameterError):
self.r.rr_create(name='test.com.', type='AAAA', ip='127.0.0.1')
with raises(InvalidParameterError):
self.r.rr_get_attrs(name='test.com', type='A', ip='::1')
with raises(InvalidParameterError):
self.r.rr_get_attrs(name='test.com', type='AAAA', ip='0.0.0.1')
self.r.rr_create(name='test.com.', type='AAAA', ip='::1')
assert rrs(self.r.rr_list('*test.com.')) == rrs(
[('@', 'test.com', 'AAAA', '::1')])
self.r.rr_get_attrs(name='test.com.', type='AAAA', ip='::1')
def test_create_cname(self):
self.r.zone_create('test.com')
with raises(InvalidParameterError):
self.r.rr_create(name='a.test.com', type='CNAME', cname='c.test.com')
self.r.rr_create(name='a.test.com.', type='CNAME', cname='c.test.com.')
self.r.rr_create(name='b.test.com.', type='MX', preference=10, exchange='test.com.')
with raises(InvalidParameterError):
self.r.rr_create(name='b.test.com', type='CNAME', cname='c.test.com')
with raises(InvalidParameterError):
self.r.rr_create(name='d.test.com.', type='MX', preference=10, exchange='a.test.com.')
def test_create_cname_2(self):
# ND-100
self.r.zone_create('test.com')
self.r.rr_create(name='cname.test.com.', type='CNAME', cname='test.com.')
self.r.ipblock_create('172.16.31.10/8', status='Container')
self.r.ippool_create('test')
self.r.ippool_add_subnet('test', '172.16.31.10/24')
self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='cname.test.com.', create_linked=False)
with raises(InvalidParameterError):
self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='cname.test.com.', create_linked=True)
def test_create_srv(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='CNAME', cname='c.test.com.')
with raises(InvalidParameterError):
self.r.rr_create(name='_a._b.test.com.', type='SRV', priority=10, weight=1, port=1, target='a.test.com.')
self.r.rr_create(name='_a._b.test.com.', type='SRV', priority=10, weight=1, port=1, target='c.test.com.')
with raises(InvalidParameterError):
self.r.rr_create(name='c.test.com.', type='CNAME', cname='a.test.com.')
def test_email(self):
self.r.zone_create('test.com')
self.r.zone_set_soa_attrs('test.com', {'mail': 'first\.last.test.com.'})
assert " first\.last.test.com. " in self.r.zone_dump('test.com')
def test_create_revzone(self):
self.r.rr_create(ip='172.16.58.3', type='PTR', ptrdname='test.com.', create_linked=False, create_revzone=True)
def test_create_rr_rp(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='RP', mbox='john\.doe.example.com.', txtdname='test.com.')
def test_create_rr_cert(self):
self.r.zone_create('test.com')
self.r.rr_create(name='a.test.com.', type='CERT', certificate_type=1, key_tag=2, algorithm=3, certificate='abc')
with raises(DimError):
self.r.rr_create(name='a.test.com.', type='CERT', certificate_type=1, key_tag=2, algorithm=3, certificate='a c')
def test_create_rr_tlsa(self):
default = dict(name='a.test.com.',
type='TLSA',
certificate_usage=1, selector=2, matching_type=1, certificate='abcd')
def rr_create(**kwargs):
d = default.copy()
d.update(kwargs)
return self.r.rr_create(**d)
self.r.zone_create('test.com')
assert set(rr_create(certificate_usage=4, selector=2, matching_type=3)['messages']) == set([
(20, 'Creating RR a TLSA 4 2 3 abcd in zone test.com'),
(30, 'certificate_usage value 4 is unassigned'),
(30, 'selector value 2 is unassigned'),
(30, 'matching_type value 3 is unassigned'),
])
rr_create(certificate_usage='PKIX-TA', selector='PRIVSEL', matching_type='SHA2-512')
for k, v in (('certificate', '1 2'),
('certificate', 'afcs'),
('selector', -1),
('matching_type', 256),
('certificate_usage', 'bad')):
with raises(DimError):
rr_create(k=v)
def test_rr_list_value_as_object(self):
self.r.zone_create('test.com')
rrs = [dict(type='TXT', strings='"a" "b"'),
dict(type='mx', preference=5, exchange='test.com.'),
dict(type='HINFO', os='os', cpu='cpu'),
dict(type='a', ip='1.2.3.4'),
dict(type='srv', priority=10, weight=1, port=1, target='a.test.com.'),
dict(type='naptr', order=1, preference=2, flags='f', service=r'223', regexp=r'r', replacement='a.de.'),
dict(type='cert', certificate_type=1, algorithm=2, key_tag=3, certificate='cert'),
dict(type='rp', mbox='gigi.a.de.', txtdname='test.com.')
]
for param in rrs:
name = '_a._b.test.com.'
self.r.rr_create(name=name, **param)
del param['type']
assert self.r.rr_list(name, value_as_object=True)[0]['value'] == param
self.r.rr_delete(name=name)
def test_root_zone_list(self):
self.r.zone_create('.')
self.r.rr_create(name='a.', type='TXT', strings=[''])
assert self.r.rr_list('a.')[0]['record'] == 'a'
def test_rr_attrs(self):
self.r.zone_create('a.de')
rrs = [dict(name='hinfo.a.de.', type='HINFO', os='os\\"', cpu='\\\\'),
dict(name='mx.a.de.', type='MX', preference=10, exchange='a.de.')]
for rr in rrs:
self.r.rr_create(**rr)
self.r.rr_set_ttl(ttl=300, **rr)
self.r.rr_set_comment(comment='com', **rr)
attrs = self.r.rr_get_attrs(**rr)
assert attrs['comment'] == 'com'
assert attrs['ttl'] == 300
with raises(InvalidParameterError):
self.r.rr_set_attrs(**rrs[0])
for dryrun in [False, True]:
comment = '%s' % | |
"""
Copyright thautwarm (c) 2019
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of thautwarm nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from dataclasses import dataclass
from typing import *
@dataclass
class Term:
v: str
@dataclass
class NonTerm:
v: str
@dataclass
class Or:
pass
@dataclass
class Seq:
xs: 'List[Union[NonTerm, Term, Or]]'
label: str
@dataclass
class More:
impl: Seq
@dataclass
class Prod:
descr: str
name: str
type: str
impl : Seq
@dataclass
class Decl:
descr: str
name: str
type: str
DEF = r'\bnfdef'
TYPEOF = r'\bnfistypeof'
TYPE = r'\bnftype'
OR = r'\bnfalt'
NT = r'\bnfnonterm'
SPACE = r' \bnfspace '
TERM = r'\bnfterm'
DESCR = r'\bnfdescr'
LABEL = r'\bnflabel'
MKLINE = '\\\\'
def macro_apply(macro, *args):
res = macro
for arg in args:
res = f'{res}{{ {arg} }}'
return res
xor = Term("|")
eq = Term("=")
define = Term("::=")
ann = Term("::")
def unwrap(x): return x[1:-1]
def to_latex(xs):
from io import StringIO
xs = iter(xs)
lastlineno, x = next(xs)
io = StringIO()
print = io.write
to_backnaur(print, x)
for lineno, x in xs:
print('\n')
print((lineno - lastlineno) * MKLINE)
to_backnaur(print, x)
lastlineno = lineno
return io.getvalue()
def to_backnaur(print, x):
if isinstance(x, Decl):
if x.descr:
print(macro_apply(DESCR, x.descr))
print(macro_apply(SPACE))
print(' & ')
to_backnaur(print, NonTerm(x.name))
if x.type:
print(' & ')
print(macro_apply(TYPEOF))
print(' & ')
print(macro_apply(TYPE, x.type))
print(' & & &')
else:
print(' & & & & & ')
return
elif isinstance(x, Prod):
if x.descr:
print(macro_apply(DESCR, x.descr))
print(macro_apply(SPACE))
print(' & ')
to_backnaur(print, NonTerm(x.name))
if x.type:
print(' & ')
print(macro_apply(TYPEOF))
print(' & ')
print(macro_apply(TYPE, x.type))
print(' & ')
else:
print(' & & & ')
print(macro_apply(DEF))
print(' & ')
to_backnaur(print, x.impl)
return
if isinstance(x, Term):
return print(macro_apply(TERM, x.v))
if isinstance(x, NonTerm):
return print(macro_apply(NT, x.v))
if isinstance(x, Or):
print(macro_apply(OR))
if isinstance(x, More):
# desc
print(' & ')
# name
print(' & ')
# typeof
print(' & ')
# type
print(' & ')
print(macro_apply(OR))
print(' & ')
return to_backnaur(print, x.impl)
if isinstance(x, Seq):
if not x.xs:
print(' & ')
return
xs = iter(x.xs)
to_backnaur(print, next(xs))
for e in xs:
print(macro_apply(SPACE))
to_backnaur(print, e)
print(macro_apply(SPACE))
print(' & ')
if x.label:
print(macro_apply(LABEL, x.label))
return
def get_label(x):
return x[3:]
from typing import Generic, TypeVar
T = TypeVar('T')
class Tokens():
__slots__ = ['array', 'offset']
def __init__(self, array):
self.array = array
self.offset = 0
class State():
def __init__(self):
pass
class AST(Generic[T]):
__slots__ = ['tag', 'contents']
def __init__(self, tag: str, contents: T):
self.tag = tag
self.contents = contents
class Nil():
nil = None
__slots__ = []
def __init__(self):
if (Nil.nil is None):
Nil.nil = self
return
raise ValueError('Nil cannot get instantiated twice.')
def __len__(self):
return 0
def __getitem__(self, n):
raise IndexError('Out of bounds')
@property
def head(self):
raise IndexError('Out of bounds')
@property
def tail(self):
raise IndexError('Out of bounds')
def __repr__(self):
return '[]'
_nil = Nil()
class Cons():
__slots__ = ['head', 'tail']
def __init__(self, _head, _tail):
self.head = _head
self.tail = _tail
def __len__(self):
nil = _nil
l = 0
while (self is not nil):
l += 1
self = self.tail
return l
def __iter__(self):
nil = _nil
while (self is not nil):
(yield self.head)
self = self.tail
def __getitem__(self, n):
while (n != 0):
self = self.tail
n -= 1
return self.head
def __repr__(self):
return repr(list(self))
try:
def mk_pretty():
from prettyprinter import register_pretty, pretty_call, pprint
@register_pretty(Tokens)
def pretty_tokens(value, ctx):
return pretty_call(ctx, Tokens, offset=value.offset, array=value.array)
@register_pretty(AST)
def pretty_ast(value, ctx):
return pretty_call(ctx, AST, tag=value.tag, contents=value.contents)
mk_pretty()
del mk_pretty
except ImportError:
pass
del T, Generic, TypeVar
builtin_cons = Cons
builtin_nil = _nil
builtin_mk_ast = AST
def mk_parser():
pass
def rbnf_named_lr_step_prod(rbnf_tmp_0, builtin_state, builtin_tokens):
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
try:
builtin_tokens.array[(builtin_tokens.offset + 0)]
_rbnf_peek_tmp = True
except IndexError:
_rbnf_peek_tmp = False
lcl_0 = _rbnf_peek_tmp
if lcl_0:
lcl_2 = builtin_tokens.array[(builtin_tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 8):
_rbnf_old_offset = builtin_tokens.offset
_rbnf_cur_token = builtin_tokens.array[_rbnf_old_offset]
builtin_tokens.offset = (_rbnf_old_offset + 1)
lcl_3 = _rbnf_cur_token
rbnf_tmp_1 = lcl_3
lcl_3 = builtin_tokens.offset
rbnf_named__off_1 = lcl_3
try:
builtin_tokens.array[(builtin_tokens.offset + 0)]
_rbnf_peek_tmp = True
except IndexError:
_rbnf_peek_tmp = False
lcl_3 = _rbnf_peek_tmp
if lcl_3:
lcl_5 = builtin_tokens.array[(builtin_tokens.offset + 0)]
lcl_5 = lcl_5.idint
if (lcl_5 == 3):
lcl_6 = rbnf_named_parse_singleprod(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_6
lcl_6 = rbnf_named__check_2[0]
lcl_6 = (lcl_6 == False)
if lcl_6:
lcl_6 = rbnf_named__check_2
else:
lcl_7 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_7
lcl_7 = rbnf_tmp_0.append
lcl_7 = lcl_7(rbnf_tmp_2)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_8 = (True, rbnf_tmp_1_)
lcl_6 = lcl_8
lcl_4 = lcl_6
elif (lcl_5 == 2):
lcl_6 = rbnf_named_parse_singleprod(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_6
lcl_6 = rbnf_named__check_2[0]
lcl_6 = (lcl_6 == False)
if lcl_6:
lcl_6 = rbnf_named__check_2
else:
lcl_8 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_8
lcl_8 = rbnf_tmp_0.append
lcl_8 = lcl_8(rbnf_tmp_2)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_9 = (True, rbnf_tmp_1_)
lcl_6 = lcl_9
lcl_4 = lcl_6
elif (lcl_5 == 1):
lcl_6 = rbnf_named_parse_singleprod(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_6
lcl_6 = rbnf_named__check_2[0]
lcl_6 = (lcl_6 == False)
if lcl_6:
lcl_6 = rbnf_named__check_2
else:
lcl_9 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_9
lcl_9 = rbnf_tmp_0.append
lcl_9 = lcl_9(rbnf_tmp_2)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_10 = (True, rbnf_tmp_1_)
lcl_6 = lcl_10
lcl_4 = lcl_6
elif (lcl_5 == 0):
lcl_10 = rbnf_named_parse_singleprod(builtin_state, builtin_tokens)
rbnf_named__check_2 = lcl_10
lcl_10 = rbnf_named__check_2[0]
lcl_10 = (lcl_10 == False)
if lcl_10:
lcl_10 = rbnf_named__check_2
else:
lcl_6 = rbnf_named__check_2[1]
rbnf_tmp_2 = lcl_6
lcl_6 = rbnf_tmp_0.append
lcl_6 = lcl_6(rbnf_tmp_2)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_11 = (True, rbnf_tmp_1_)
lcl_10 = lcl_11
lcl_4 = lcl_10
elif (lcl_5 == 8):
rbnf_tmp_1_ = rbnf_tmp_0
lcl_10 = (True, rbnf_tmp_1_)
lcl_4 = lcl_10
elif (lcl_5 == 9):
rbnf_tmp_1_ = rbnf_tmp_0
lcl_10 = (True, rbnf_tmp_1_)
lcl_4 = lcl_10
else:
rbnf_tmp_1_ = rbnf_tmp_0
lcl_10 = (True, rbnf_tmp_1_)
lcl_4 = lcl_10
lcl_3 = lcl_4
else:
lcl_10 = (rbnf_named__off_1, 'prod got EOF')
lcl_10 = builtin_cons(lcl_10, builtin_nil)
lcl_10 = (False, lcl_10)
lcl_3 = lcl_10
lcl_1 = lcl_3
elif (lcl_2 == 9):
_rbnf_old_offset = builtin_tokens.offset
_rbnf_cur_token = builtin_tokens.array[_rbnf_old_offset]
builtin_tokens.offset = (_rbnf_old_offset + 1)
lcl_10 = _rbnf_cur_token
rbnf_tmp_1 = lcl_10
rbnf_tmp_1_ = rbnf_tmp_0
lcl_10 = (True, rbnf_tmp_1_)
lcl_1 = lcl_10
else:
lcl_10 = (rbnf_named__off_0, 'prod lookahead failed')
lcl_10 = builtin_cons(lcl_10, builtin_nil)
lcl_10 = (False, lcl_10)
lcl_1 = lcl_10
lcl_0 = lcl_1
else:
lcl_1 = (rbnf_named__off_0, 'prod got EOF')
lcl_1 = builtin_cons(lcl_1, builtin_nil)
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def rbnf_named_lr_loop_prod(rbnf_tmp_0, builtin_state, builtin_tokens):
rbnf_named_lr_prod_reduce = rbnf_tmp_0
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
lcl_0 = rbnf_named_lr_step_prod(rbnf_named_lr_prod_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_prod_try = lcl_0
lcl_0 = rbnf_named_lr_prod_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = builtin_tokens.offset
rbnf_named__off_0 = lcl_1
lcl_1 = rbnf_named_lr_prod_try[1]
rbnf_named_lr_prod_reduce = lcl_1
lcl_1 = rbnf_named_lr_step_prod(rbnf_named_lr_prod_reduce, builtin_state, builtin_tokens)
rbnf_named_lr_prod_try = lcl_1
lcl_1 = rbnf_named_lr_prod_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = builtin_tokens.offset
lcl_0 = (lcl_0 == rbnf_named__off_0)
if lcl_0:
lcl_1 = (True, rbnf_named_lr_prod_reduce)
lcl_0 = lcl_1
else:
lcl_0 = rbnf_named_lr_prod_try
return lcl_0
def rbnf_named_lr_step_rbnfmacro_0(rbnf_tmp_0, builtin_state, builtin_tokens):
lcl_0 = rbnf_named_parse_atom(builtin_state, builtin_tokens)
rbnf_named__check_1 = lcl_0
lcl_0 = rbnf_named__check_1[0]
lcl_0 = (lcl_0 == False)
if lcl_0:
lcl_0 = rbnf_named__check_1
else:
lcl_1 = rbnf_named__check_1[1]
rbnf_tmp_1 = lcl_1
lcl_1 = rbnf_tmp_0.append
lcl_1 = lcl_1(rbnf_tmp_1)
rbnf_tmp_1_ = rbnf_tmp_0
lcl_2 = (True, rbnf_tmp_1_)
lcl_0 = lcl_2
return lcl_0
def rbnf_named_lr_loop_rbnfmacro_0(rbnf_tmp_0, builtin_state, builtin_tokens):
rbnf_named_lr_rbnfmacro_0_reduce = rbnf_tmp_0
lcl_0 = builtin_tokens.offset
rbnf_named__off_0 = lcl_0
lcl_0 = rbnf_named_lr_step_rbnfmacro_0(rbnf_named_lr_rbnfmacro_0_reduce, builtin_state, | |
<reponame>OuyangChao/Paddle<filename>python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py<gh_stars>1-10
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
def stable_softmax(x):
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def log_softmax(x, axis=-1):
softmax_out = np.apply_along_axis(stable_softmax, axis, x)
return np.log(softmax_out)
def cross_entropy_loss_1d(input,
label,
weight=None,
reduction='mean',
ignore_index=-100):
log_softmax_out = log_softmax(input)
input_shape = log_softmax_out.shape
N = input_shape[0]
C = input_shape[1]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
for i in range(N):
cur_target = label[i]
if cur_target == ignore_index:
out[i] = 0
continue
cur_weight = weight[cur_target] if weight is not None else 1
total_weight += cur_weight
out[i] = -log_softmax_out[i][cur_target] * cur_weight
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
return out.sum() / total_weight, np.array(
[total_weight]).astype('float64')
elif reduction == 'none':
return out
def cross_entropy_loss_2d(input,
label,
weight=None,
reduction='mean',
ignore_index=-100):
log_softmax_out = log_softmax(input)
input_shape = log_softmax_out.shape
N = input_shape[0]
H = input_shape[1]
W = input_shape[2]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
for i in range(N):
for h in range(H):
for w in range(W):
cur_target = label[i][h][w]
if cur_target == ignore_index:
out[i][h][w] = 0
continue
cur_weight = weight[cur_target] if weight is not None else 1
total_weight += cur_weight
out[i][h][w] = -log_softmax_out[i][h][w][
cur_target] * cur_weight
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
return out.sum() / total_weight, np.array(
[total_weight]).astype('float64')
elif reduction == 'none':
return out
class CrossEntropyLoss(unittest.TestCase):
def test_cross_entropy_loss_1d_with_mean_ignore(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(ignore_index=0)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(input_np, label_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
axis=1, ignore_index=0)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, ignore_index=0)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_mean_ignore(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
weight = fluid.data(
name='weight', shape=[4],
dtype='float64') #weight for each class
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, ignore_index=0)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np),
axis=1,
ignore_index=0)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, ignore_index=0)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_mean(self):
input_np = np.random.random([2, 4]).astype(np.float64)
label_np = np.random.randint(0, 4, size=(2)).astype(np.int64)
weight_np = np.random.random([4]).astype(np.float64) #shape:C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[2, 4], dtype='float64')
label = fluid.data(name='label', shape=[2], dtype='int64')
weight = fluid.data(
name='weight', shape=[4],
dtype='float64') #weight for each class
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight)
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), axis=1)
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_sum(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_none(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=fluid.dygraph.to_variable(weight_np), reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_with_weight_none_func(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[200], dtype='float64')
ret = paddle.nn.functional.cross_entropy(
input, label, weight=weight, reduction='none')
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={
'input': input_np,
'label': label_np,
"weight": weight_np
},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
dy_ret = paddle.nn.functional.cross_entropy(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np),
weight=fluid.dygraph.to_variable(weight_np),
reduction='none')
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(
input_np, label_np, weight=weight_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_mean(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
weight_np = np.random.random([200]).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
weight = fluid.data(name='weight', shape=[100], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss()
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss()
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np)[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_sum(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='sum')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, reduction='sum')[0]
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_1d_none(self):
input_np = np.random.random([100, 200]).astype(np.float64) #N,C
label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) #N,1
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(name='input', shape=[100, 200], dtype='float64')
label = fluid.data(name='label', shape=[100], dtype='int64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
ret = cross_entropy_loss(input, label)
exe = fluid.Executor(place)
static_ret = exe.run(prog,
feed={'input': input_np,
'label': label_np},
fetch_list=[ret])
static_ret = np.squeeze(static_ret)
self.assertIsNotNone(static_ret)
with fluid.dygraph.guard():
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
reduction='none')
dy_ret = cross_entropy_loss(
fluid.dygraph.to_variable(input_np),
fluid.dygraph.to_variable(label_np))
dy_ret_value = dy_ret.numpy()
dy_ret_value = np.squeeze(dy_ret_value)
self.assertIsNotNone(dy_ret_value)
expected = cross_entropy_loss_1d(input_np, label_np, reduction='none')
self.assertTrue(np.allclose(static_ret, dy_ret_value))
self.assertTrue(np.allclose(static_ret, expected))
self.assertTrue(np.allclose(dy_ret_value, expected))
def test_cross_entropy_loss_2d_with_weight_none(self):
input_np = np.random.random(size=(2, 2, 2, 3)).astype(np.float64) #NHWC
label_np = np.random.randint(
0, 3, size=(2, 2, 2)).astype(np.int64) #NHW1
weight_np = np.random.random(size=(3, )).astype(np.float64) #C
paddle.enable_static()
prog = fluid.Program()
startup_prog = fluid.Program()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
with fluid.program_guard(prog, startup_prog):
input = fluid.data(
name='input', shape=[2, 2, 2, 3], dtype='float64')
label = fluid.data(name='label', shape=[2, 2, 2], dtype='int64')
weight = fluid.data(name='weight', shape=[3], dtype='float64')
cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(
weight=weight, reduction='none')
ret = cross_entropy_loss(input, label)
exe = | |
codomain = FloatDomain()
class BindRoundTo(BindPolyFunction):
call(('round', 2))
signature = RoundToSig
hint = """round(x, n) -> x rounded to a given precision"""
class CorrelateDecimalRoundTo(CorrelateFunction):
match(RoundToSig, (IntegerDomain, IntegerDomain),
(DecimalDomain, IntegerDomain))
signature = RoundToSig
domains = [DecimalDomain(), IntegerDomain()]
codomain = DecimalDomain()
class BindTrunc(BindPolyFunction):
call('trunc')
signature = TruncSig
class CorrelateDecimalTrunc(CorrelateFunction):
match(TruncSig, IntegerDomain,
DecimalDomain)
signature = TruncSig
domains = [DecimalDomain()]
codomain = DecimalDomain()
class CorrelateFloatTrunc(CorrelateFunction):
match(TruncSig, FloatDomain)
signature = TruncSig
domains = [FloatDomain()]
codomain = FloatDomain()
class BindTruncTo(BindPolyFunction):
call(('trunc', 2))
signature = TruncToSig
class CorrelateDecimalTruncTo(CorrelateFunction):
match(TruncToSig, (IntegerDomain, IntegerDomain),
(DecimalDomain, IntegerDomain))
signature = TruncToSig
domains = [DecimalDomain(), IntegerDomain()]
codomain = DecimalDomain()
class BindSquareRoot(BindPolyFunction):
call('sqrt')
signature = SquareRootSig
class CorrelateSquareRoot(CorrelateFunction):
match(SquareRootSig, IntegerDomain,
DecimalDomain,
FloatDomain)
signature = SquareRootSig
domains = [FloatDomain()]
codomain = FloatDomain()
class BindLength(BindPolyFunction):
call('length')
signature = LengthSig
hint = """length(s) -> length of s"""
class CorrelateTextLength(CorrelateFunction):
match(LengthSig, TextDomain,
UntypedDomain)
signature = LengthSig
domains = [TextDomain()]
codomain = IntegerDomain()
class BindContainsBase(BindPolyFunction):
signature = ContainsSig
polarity = None
def correlate(self, **arguments):
binding = FormulaBinding(self.state.scope,
self.signature(self.polarity),
self.codomain, self.syntax, **arguments)
return Correlate.__invoke__(binding, self.state)
class BindContains(BindContainsBase):
call('~')
polarity = +1
hint = """(s ~ sub) -> TRUE if s contains sub"""
class BindNotContains(BindContainsBase):
call('!~')
polarity = -1
hint = """(s !~ sub) -> TRUE if s does not contain sub"""
class CorrelateTextContains(CorrelateFunction):
match(ContainsSig, (TextDomain, TextDomain),
(TextDomain, UntypedDomain),
(UntypedDomain, TextDomain),
(UntypedDomain, UntypedDomain))
signature = ContainsSig
domains = [TextDomain(), TextDomain()]
codomain = BooleanDomain()
class BindHeadTailBase(BindPolyFunction):
signature = None
def correlate(self, op, length):
if length is not None:
length = ImplicitCastBinding(length, coerce(IntegerDomain()),
length.syntax)
binding = FormulaBinding(self.state.scope,
self.signature(), UntypedDomain(),
self.syntax, op=op, length=length)
return Correlate.__invoke__(binding, self.state)
class BindHead(BindPolyFunction):
call('head')
signature = HeadSig
hint = """head(s[, N=1]) -> the first N elements of s"""
class BindTail(BindPolyFunction):
call('tail')
signature = TailSig
hint = """tail(s[, N=1]) -> the last N elements of s"""
class CorrelateHead(CorrelateFunction):
match(HeadSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class CorrelateTail(CorrelateFunction):
match(TailSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class BindSlice(BindPolyFunction):
call('slice')
signature = SliceSig
hint = """slice(s, i, j) -> slice of s from i-th to j-th elements"""
def correlate(self, op, left, right):
if left is not None:
left = ImplicitCastBinding(left, coerce(IntegerDomain()),
left.syntax)
if right is not None:
right = ImplicitCastBinding(right, coerce(IntegerDomain()),
right.syntax)
binding = FormulaBinding(self.state.scope,
self.signature(), UntypedDomain(),
self.syntax, op=op, left=left, right=right)
return Correlate.__invoke__(binding, self.state)
class CorrelateSlice(CorrelateFunction):
match(SliceSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class BindAt(BindPolyFunction):
call('at')
signature = AtSig
hint = """at(s, i[, len=1]) -> i-th to (i+len)-th elements of s"""
def correlate(self, op, index, length):
index = ImplicitCastBinding(index, coerce(IntegerDomain()),
index.syntax)
if length is not None:
length = ImplicitCastBinding(length, coerce(IntegerDomain()),
length.syntax)
binding = FormulaBinding(self.state.scope,
self.signature(), UntypedDomain(),
self.syntax, op=op, index=index, length=length)
return Correlate.__invoke__(binding, self.state)
class CorrelateAt(CorrelateFunction):
match(AtSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class BindReplace(BindPolyFunction):
call('replace')
signature = ReplaceSig
hint = """replace(s, o, n) -> s with occurences of o replaced by n"""
class CorrelateReplace(CorrelateFunction):
match(ReplaceSig, UntypedDomain,
TextDomain)
domains = [TextDomain(), TextDomain(), TextDomain()]
codomain = TextDomain()
class BindUpper(BindPolyFunction):
call('upper')
signature = UpperSig
hint = """upper(s) -> s converted to uppercase"""
class CorrelateUpper(CorrelateFunction):
match(UpperSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class BindLower(BindPolyFunction):
call('lower')
signature = LowerSig
hint = """lower(s) -> s converted to lowercase"""
class CorrelateLower(CorrelateFunction):
match(LowerSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class BindTrimBase(BindPolyFunction):
signature = TrimSig
is_left = False
is_right = False
def correlate(self, op):
signature = self.signature(is_left=self.is_left,
is_right=self.is_right)
binding = FormulaBinding(self.state.scope,
signature, UntypedDomain(), self.syntax, op=op)
return Correlate.__invoke__(binding, self.state)
class BindTrim(BindTrimBase):
call('trim')
is_left = True
is_right = True
hint = """trim(s) -> s with leading and trailing whitespaces removed"""
class BindLTrim(BindTrimBase):
call('ltrim')
is_left = True
hint = """ltrim(s) -> s with leading whitespaces removed"""
class BindRTrim(BindTrimBase):
call('rtrim')
is_right = True
hint = """rtrim(s) -> s with trailing whitespaces removed"""
class BindToday(BindMonoFunction):
call('today')
signature = TodaySig
codomain = DateDomain()
hint = """today() -> the current date"""
class BindNow(BindMonoFunction):
call('now')
signature = NowSig
codomain = DateTimeDomain()
class BindExtractYear(BindPolyFunction):
call('year')
signature = ExtractYearSig
hint = """year(date) -> the year of a given date"""
class BindExtractMonth(BindPolyFunction):
call('month')
signature = ExtractMonthSig
hint = """month(date) -> the month of a given date"""
class BindExtractDay(BindPolyFunction):
call('day')
signature = ExtractDaySig
hint = """day(date) -> the day of a given date"""
class BindExtractHour(BindPolyFunction):
call('hour')
signature = ExtractHourSig
class BindExtractMinute(BindPolyFunction):
call('minute')
signature = ExtractMinuteSig
class BindExtractSecond(BindPolyFunction):
call('second')
signature = ExtractSecondSig
class CorrelateExtractYearFromDate(CorrelateFunction):
match(ExtractYearSig, DateDomain)
domains = [DateDomain()]
codomain = IntegerDomain()
class CorrelateExtractYearFromDateTime(CorrelateFunction):
match(ExtractYearSig, DateTimeDomain)
domains = [DateTimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractMonthFromDate(CorrelateFunction):
match(ExtractMonthSig, DateDomain)
domains = [DateDomain()]
codomain = IntegerDomain()
class CorrelateExtractMonthFromDateTime(CorrelateFunction):
match(ExtractMonthSig, DateTimeDomain)
domains = [DateTimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractDayFromDate(CorrelateFunction):
match(ExtractDaySig, DateDomain)
domains = [DateDomain()]
codomain = IntegerDomain()
class CorrelateExtractDayFromDateTime(CorrelateFunction):
match(ExtractDaySig, DateTimeDomain)
domains = [DateTimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractHourFromTime(CorrelateFunction):
match(ExtractHourSig, TimeDomain)
domains = [TimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractHourFromDateTime(CorrelateFunction):
match(ExtractHourSig, DateTimeDomain)
domains = [DateTimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractMinuteFromTime(CorrelateFunction):
match(ExtractMinuteSig, TimeDomain)
domains = [TimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractMinuteFromDateTime(CorrelateFunction):
match(ExtractMinuteSig, DateTimeDomain)
domains = [DateTimeDomain()]
codomain = IntegerDomain()
class CorrelateExtractSecondFromTime(CorrelateFunction):
match(ExtractSecondSig, TimeDomain)
domains = [TimeDomain()]
codomain = FloatDomain()
class CorrelateExtractSecondFromDateTime(CorrelateFunction):
match(ExtractSecondSig, DateTimeDomain)
domains = [DateTimeDomain()]
codomain = FloatDomain()
class CorrelateTrim(CorrelateFunction):
match(TrimSig, UntypedDomain,
TextDomain)
domains = [TextDomain()]
codomain = TextDomain()
class BindIsNull(BindHomoFunction):
call('is_null')
signature = IsNullSig(+1)
codomain = BooleanDomain()
hint = """is_null(x) -> TRUE if x is NULL"""
class BindNullIf(BindHomoFunction):
call('null_if')
signature = NullIfSig()
hint = """null_if(x, y) -> NULL if x is equal to y; x otherwise"""
class BindIfNull(BindHomoFunction):
call('if_null')
signature = IfNullSig()
hint = """if_null(x, y) -> y if x is NULL; x otherwise"""
class BindIf(BindFunction):
call('if')
signature = IfSig
hint = """if(p, c[, ...][, a=NULL]) -> c if p; a otherwise"""
def match(self):
operands = list(reversed(self.syntax.arguments))
if len(operands) < 2:
raise Error("Function '%s' expects 2 or more arguments;"
" got %s" % (self.name.encode('utf-8'),
len(operands)))
predicates = []
consequents = []
alternative = None
while operands:
if len(operands) == 1:
alternative = operands.pop()
else:
predicates.append(operands.pop())
consequents.append(operands.pop())
return {
'predicates': predicates,
'consequents': consequents,
'alternative': alternative,
}
def correlate(self, predicates, consequents, alternative):
predicates = [ImplicitCastBinding(predicate, coerce(BooleanDomain()),
predicate.syntax)
for predicate in predicates]
domains = [consequent.domain for consequent in consequents]
if alternative is not None:
domains.append(alternative.domain)
domain = coerce(*domains)
if domain is None:
if len(domains) > 1:
raise Error("Cannot coerce values of types (%s)"
" to a common type"
% (", ".join(str(domain)
for domain in domains)))
else:
with translate_guard(consequents[0]
if consequents else alternative):
raise Error("Expected a scalar value")
consequents = [ImplicitCastBinding(consequent, domain,
consequent.syntax)
for consequent in consequents]
if alternative is not None:
alternative = ImplicitCastBinding(alternative, domain,
alternative.syntax)
return FormulaBinding(self.state.scope,
self.signature(), domain, self.syntax,
predicates=predicates,
consequents=consequents,
alternative=alternative)
class BindSwitch(BindFunction):
call('switch')
signature = SwitchSig
hint = """switch(x, v, c, [...][, a=NULL]) -> c if x = v; a otherwise"""
def match(self):
operands = list(reversed(self.syntax.arguments))
if len(operands) < 3:
raise Error("Function '%s' expects 3 or more arguments;"
" got %s" % (self.name.encode('utf-8'),
len(operands)))
variable = None
variants = []
consequents = []
alternative = None
variable = operands.pop()
while operands:
if len(operands) == 1:
alternative = operands.pop()
else:
variants.append(operands.pop())
consequents.append(operands.pop())
return {
'variable': variable,
'variants': variants,
'consequents': consequents,
'alternative': alternative,
}
def correlate(self, variable, variants, consequents, alternative):
domains = [variable.domain] + [variant.domain for variant in variants]
domain = coerce(*domains)
if domain is None:
raise Error("Cannot coerce values of types (%s)"
" to a common type"
% (", ".join(str(domain)
for domain in domains)))
variable = ImplicitCastBinding(variable, domain, variable.syntax)
variants = [ImplicitCastBinding(variant, domain, variant.syntax)
for variant in variants]
domains = [consequent.domain for consequent in consequents]
if alternative is not None:
domains.append(alternative.domain)
domain = coerce(*domains)
if domain is None:
if len(domains) > 1:
raise Error("Cannot coerce values of types (%s)"
" to a common type"
% (", ".join(str(domain)
for domain in domains)))
else:
with translate_guard(consequents[0] if consequents
else alternative):
raise Error("Expected a scalar value")
consequents = [ImplicitCastBinding(consequent, domain,
consequent.syntax)
for consequent in consequents]
if alternative is not None:
alternative = ImplicitCastBinding(alternative, domain,
alternative.syntax)
return FormulaBinding(self.state.scope,
self.signature(), domain, self.syntax,
variable=variable,
variants=variants,
consequents=consequents,
alternative=alternative)
class BindExistsBase(BindFunction):
signature = ExistsSig
polarity = None
def correlate(self, op):
recipes = expand(op, with_syntax=True)
plural_base = None
if recipes is not None:
if len(recipes) != 1:
with translate_guard(op):
raise Error("Function '%s' expects 1 argument; got %s"
% (self.name.encode('utf-8'), len(recipes)))
plural_base = op
syntax, recipe = recipes[0]
op = self.state.use(recipe, syntax)
op = ImplicitCastBinding(op, coerce(BooleanDomain()), op.syntax)
return FormulaBinding(self.state.scope,
QuantifySig(self.polarity), op.domain,
self.syntax, plural_base=plural_base, op=op)
class BindExists(BindExistsBase):
call('exists')
polarity = +1
hint = """base.exists(p) -> TRUE if there exists p such that | |
<filename>bindings/python/capstone/arm64_const.py
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [arm64_const.py]
# ARM64 shift type
ARM64_SFT_INVALID = 0
ARM64_SFT_LSL = 1
ARM64_SFT_MSL = 2
ARM64_SFT_LSR = 3
ARM64_SFT_ASR = 4
ARM64_SFT_ROR = 5
# ARM64 extender type
ARM64_EXT_INVALID = 0
ARM64_EXT_UXTB = 1
ARM64_EXT_UXTH = 2
ARM64_EXT_UXTW = 3
ARM64_EXT_UXTX = 4
ARM64_EXT_SXTB = 5
ARM64_EXT_SXTH = 6
ARM64_EXT_SXTW = 7
ARM64_EXT_SXTX = 8
# ARM64 condition code
ARM64_CC_INVALID = 0
ARM64_CC_EQ = 1
ARM64_CC_NE = 2
ARM64_CC_HS = 3
ARM64_CC_LO = 4
ARM64_CC_MI = 5
ARM64_CC_PL = 6
ARM64_CC_VS = 7
ARM64_CC_VC = 8
ARM64_CC_HI = 9
ARM64_CC_LS = 10
ARM64_CC_GE = 11
ARM64_CC_LT = 12
ARM64_CC_GT = 13
ARM64_CC_LE = 14
ARM64_CC_AL = 15
ARM64_CC_NV = 16
# Operand type for instruction's operands
ARM64_OP_INVALID = 0
ARM64_OP_REG = 1
ARM64_OP_CIMM = 2
ARM64_OP_IMM = 3
ARM64_OP_FP = 4
ARM64_OP_MEM = 5
# ARM64 registers
ARM64_REG_INVALID = 0
ARM64_REG_NZCV = 1
ARM64_REG_WSP = 2
ARM64_REG_WZR = 3
ARM64_REG_SP = 4
ARM64_REG_XZR = 5
ARM64_REG_B0 = 6
ARM64_REG_B1 = 7
ARM64_REG_B2 = 8
ARM64_REG_B3 = 9
ARM64_REG_B4 = 10
ARM64_REG_B5 = 11
ARM64_REG_B6 = 12
ARM64_REG_B7 = 13
ARM64_REG_B8 = 14
ARM64_REG_B9 = 15
ARM64_REG_B10 = 16
ARM64_REG_B11 = 17
ARM64_REG_B12 = 18
ARM64_REG_B13 = 19
ARM64_REG_B14 = 20
ARM64_REG_B15 = 21
ARM64_REG_B16 = 22
ARM64_REG_B17 = 23
ARM64_REG_B18 = 24
ARM64_REG_B19 = 25
ARM64_REG_B20 = 26
ARM64_REG_B21 = 27
ARM64_REG_B22 = 28
ARM64_REG_B23 = 29
ARM64_REG_B24 = 30
ARM64_REG_B25 = 31
ARM64_REG_B26 = 32
ARM64_REG_B27 = 33
ARM64_REG_B28 = 34
ARM64_REG_B29 = 35
ARM64_REG_B30 = 36
ARM64_REG_B31 = 37
ARM64_REG_D0 = 38
ARM64_REG_D1 = 39
ARM64_REG_D2 = 40
ARM64_REG_D3 = 41
ARM64_REG_D4 = 42
ARM64_REG_D5 = 43
ARM64_REG_D6 = 44
ARM64_REG_D7 = 45
ARM64_REG_D8 = 46
ARM64_REG_D9 = 47
ARM64_REG_D10 = 48
ARM64_REG_D11 = 49
ARM64_REG_D12 = 50
ARM64_REG_D13 = 51
ARM64_REG_D14 = 52
ARM64_REG_D15 = 53
ARM64_REG_D16 = 54
ARM64_REG_D17 = 55
ARM64_REG_D18 = 56
ARM64_REG_D19 = 57
ARM64_REG_D20 = 58
ARM64_REG_D21 = 59
ARM64_REG_D22 = 60
ARM64_REG_D23 = 61
ARM64_REG_D24 = 62
ARM64_REG_D25 = 63
ARM64_REG_D26 = 64
ARM64_REG_D27 = 65
ARM64_REG_D28 = 66
ARM64_REG_D29 = 67
ARM64_REG_D30 = 68
ARM64_REG_D31 = 69
ARM64_REG_H0 = 70
ARM64_REG_H1 = 71
ARM64_REG_H2 = 72
ARM64_REG_H3 = 73
ARM64_REG_H4 = 74
ARM64_REG_H5 = 75
ARM64_REG_H6 = 76
ARM64_REG_H7 = 77
ARM64_REG_H8 = 78
ARM64_REG_H9 = 79
ARM64_REG_H10 = 80
ARM64_REG_H11 = 81
ARM64_REG_H12 = 82
ARM64_REG_H13 = 83
ARM64_REG_H14 = 84
ARM64_REG_H15 = 85
ARM64_REG_H16 = 86
ARM64_REG_H17 = 87
ARM64_REG_H18 = 88
ARM64_REG_H19 = 89
ARM64_REG_H20 = 90
ARM64_REG_H21 = 91
ARM64_REG_H22 = 92
ARM64_REG_H23 = 93
ARM64_REG_H24 = 94
ARM64_REG_H25 = 95
ARM64_REG_H26 = 96
ARM64_REG_H27 = 97
ARM64_REG_H28 = 98
ARM64_REG_H29 = 99
ARM64_REG_H30 = 100
ARM64_REG_H31 = 101
ARM64_REG_Q0 = 102
ARM64_REG_Q1 = 103
ARM64_REG_Q2 = 104
ARM64_REG_Q3 = 105
ARM64_REG_Q4 = 106
ARM64_REG_Q5 = 107
ARM64_REG_Q6 = 108
ARM64_REG_Q7 = 109
ARM64_REG_Q8 = 110
ARM64_REG_Q9 = 111
ARM64_REG_Q10 = 112
ARM64_REG_Q11 = 113
ARM64_REG_Q12 = 114
ARM64_REG_Q13 = 115
ARM64_REG_Q14 = 116
ARM64_REG_Q15 = 117
ARM64_REG_Q16 = 118
ARM64_REG_Q17 = 119
ARM64_REG_Q18 = 120
ARM64_REG_Q19 = 121
ARM64_REG_Q20 = 122
ARM64_REG_Q21 = 123
ARM64_REG_Q22 = 124
ARM64_REG_Q23 = 125
ARM64_REG_Q24 = 126
ARM64_REG_Q25 = 127
ARM64_REG_Q26 = 128
ARM64_REG_Q27 = 129
ARM64_REG_Q28 = 130
ARM64_REG_Q29 = 131
ARM64_REG_Q30 = 132
ARM64_REG_Q31 = 133
ARM64_REG_S0 = 134
ARM64_REG_S1 = 135
ARM64_REG_S2 = 136
ARM64_REG_S3 = 137
ARM64_REG_S4 = 138
ARM64_REG_S5 = 139
ARM64_REG_S6 = 140
ARM64_REG_S7 = 141
ARM64_REG_S8 = 142
ARM64_REG_S9 = 143
ARM64_REG_S10 = 144
ARM64_REG_S11 = 145
ARM64_REG_S12 = 146
ARM64_REG_S13 = 147
ARM64_REG_S14 = 148
ARM64_REG_S15 = 149
ARM64_REG_S16 = 150
ARM64_REG_S17 = 151
ARM64_REG_S18 = 152
ARM64_REG_S19 = 153
ARM64_REG_S20 = 154
ARM64_REG_S21 = 155
ARM64_REG_S22 = 156
ARM64_REG_S23 = 157
ARM64_REG_S24 = 158
ARM64_REG_S25 = 159
ARM64_REG_S26 = 160
ARM64_REG_S27 = 161
ARM64_REG_S28 = 162
ARM64_REG_S29 = 163
ARM64_REG_S30 = 164
ARM64_REG_S31 = 165
ARM64_REG_W0 = 166
ARM64_REG_W1 = 167
ARM64_REG_W2 = 168
ARM64_REG_W3 = 169
ARM64_REG_W4 = 170
ARM64_REG_W5 = 171
ARM64_REG_W6 = 172
ARM64_REG_W7 = 173
ARM64_REG_W8 = 174
ARM64_REG_W9 = 175
ARM64_REG_W10 = 176
ARM64_REG_W11 = 177
ARM64_REG_W12 = 178
ARM64_REG_W13 = 179
ARM64_REG_W14 = 180
ARM64_REG_W15 = 181
ARM64_REG_W16 = 182
ARM64_REG_W17 = 183
ARM64_REG_W18 = 184
ARM64_REG_W19 = 185
ARM64_REG_W20 = 186
ARM64_REG_W21 = 187
ARM64_REG_W22 = 188
ARM64_REG_W23 = 189
ARM64_REG_W24 = 190
ARM64_REG_W25 = 191
ARM64_REG_W26 = 192
ARM64_REG_W27 = 193
ARM64_REG_W28 = 194
ARM64_REG_W29 = 195
ARM64_REG_W30 = 196
ARM64_REG_X0 = 197
ARM64_REG_X1 = 198
ARM64_REG_X2 = 199
ARM64_REG_X3 = 200
ARM64_REG_X4 = 201
ARM64_REG_X5 = 202
ARM64_REG_X6 = 203
ARM64_REG_X7 = 204
ARM64_REG_X8 = 205
ARM64_REG_X9 = 206
ARM64_REG_X10 = 207
ARM64_REG_X11 = 208
ARM64_REG_X12 = 209
ARM64_REG_X13 = 210
ARM64_REG_X14 = 211
ARM64_REG_X15 = 212
ARM64_REG_X16 = 213
ARM64_REG_X17 = 214
ARM64_REG_X18 = 215
ARM64_REG_X19 = 216
ARM64_REG_X20 = 217
ARM64_REG_X21 = 218
ARM64_REG_X22 = 219
ARM64_REG_X23 = 220
ARM64_REG_X24 = 221
ARM64_REG_X25 = 222
ARM64_REG_X26 = 223
ARM64_REG_X27 = 224
ARM64_REG_X28 = 225
ARM64_REG_X29 = 226
ARM64_REG_X30 = 227
ARM64_REG_MAX = 228
# alias registers
ARM64_REG_IP1 = ARM64_REG_X16
ARM64_REG_IP0 = ARM64_REG_X17
ARM64_REG_FP = ARM64_REG_X29
ARM64_REG_LR = ARM64_REG_X30
# ARM64 instruction
ARM64_INS_INVALID = 0
ARM64_INS_ABS = 1
ARM64_INS_ADC = 2
ARM64_INS_ADDHN2 = 3
ARM64_INS_ADDHN = 4
ARM64_INS_ADDP = 5
ARM64_INS_ADDV = 6
ARM64_INS_ADD = 7
ARM64_INS_CMN = 8
ARM64_INS_ADRP = 9
ARM64_INS_ADR = 10
ARM64_INS_AESD = 11
ARM64_INS_AESE = 12
ARM64_INS_AESIMC = 13
ARM64_INS_AESMC = 14
ARM64_INS_AND = 15
ARM64_INS_ASR = 16
ARM64_INS_AT = 17
ARM64_INS_BFI = 18
ARM64_INS_BFM = 19
ARM64_INS_BFXIL = 20
ARM64_INS_BIC = 21
ARM64_INS_BIF = 22
ARM64_INS_BIT = 23
ARM64_INS_BLR = 24
ARM64_INS_BL = 25
ARM64_INS_BRK = 26
ARM64_INS_BR = 27
ARM64_INS_BSL = 28
ARM64_INS_B = 29
ARM64_INS_CBNZ = 30
ARM64_INS_CBZ = 31
ARM64_INS_CCMN = 32
ARM64_INS_CCMP = 33
ARM64_INS_CLREX = 34
ARM64_INS_CLS = 35
ARM64_INS_CLZ = 36
ARM64_INS_CMEQ = 37
ARM64_INS_CMGE = 38
ARM64_INS_CMGT = 39
ARM64_INS_CMHI = 40
ARM64_INS_CMHS = 41
ARM64_INS_CMLE = 42
ARM64_INS_CMLT = 43
ARM64_INS_CMP = 44
ARM64_INS_CMTST = 45
ARM64_INS_CNT = 46
ARM64_INS_CRC32B = 47
ARM64_INS_CRC32CB = 48
ARM64_INS_CRC32CH = 49
ARM64_INS_CRC32CW = 50
ARM64_INS_CRC32CX = 51
ARM64_INS_CRC32H = 52
ARM64_INS_CRC32W = 53
ARM64_INS_CRC32X = 54
ARM64_INS_CSEL = 55
ARM64_INS_CSINC = 56
ARM64_INS_CSINV = 57
ARM64_INS_CSNEG = 58
ARM64_INS_DCPS1 = 59
ARM64_INS_DCPS2 = 60
ARM64_INS_DCPS3 = 61
ARM64_INS_DC = 62
ARM64_INS_DMB = 63
ARM64_INS_DRPS = 64
ARM64_INS_DSB = 65
ARM64_INS_DUP = 66
ARM64_INS_EON = 67
ARM64_INS_EOR = 68
ARM64_INS_ERET = 69
ARM64_INS_EXTR = 70
ARM64_INS_EXT = 71
ARM64_INS_FABD = 72
ARM64_INS_FABS = 73
ARM64_INS_FACGE = 74
ARM64_INS_FACGT = 75
ARM64_INS_FADDP = 76
ARM64_INS_FADD = 77
ARM64_INS_FCCMPE = 78
ARM64_INS_FCCMP = 79
ARM64_INS_FCMEQ = 80
ARM64_INS_FCMGE = 81
ARM64_INS_FCMGT = 82
ARM64_INS_FCMLE = 83
ARM64_INS_FCMLT = 84
ARM64_INS_FCMP = 85
ARM64_INS_FCMPE = 86
ARM64_INS_FCSEL = 87
ARM64_INS_FCVTAS = 88
ARM64_INS_FCVTAU = 89
ARM64_INS_FCVTL = 90
ARM64_INS_FCVTL2 = 91
ARM64_INS_FCVTMS = 92
ARM64_INS_FCVTMU = 93
ARM64_INS_FCVTN = 94
ARM64_INS_FCVTN2 = 95
ARM64_INS_FCVTNS = 96
ARM64_INS_FCVTNU = 97
ARM64_INS_FCVTPS = 98
ARM64_INS_FCVTPU = 99
ARM64_INS_FCVTXN = 100
ARM64_INS_FCVTXN2 = 101
ARM64_INS_FCVTZS = 102
ARM64_INS_FCVTZU = 103
ARM64_INS_FCVT = 104
ARM64_INS_FDIV = 105
ARM64_INS_FMADD = 106
ARM64_INS_FMAXNMP = 107
ARM64_INS_FMAXNMV = 108
ARM64_INS_FMAXNM = 109
ARM64_INS_FMAXP = 110
ARM64_INS_FMAXV = 111
ARM64_INS_FMAX = 112
ARM64_INS_FMINNMP = 113
ARM64_INS_FMINNMV = 114
ARM64_INS_FMINNM = 115
ARM64_INS_FMINP = 116
ARM64_INS_FMINV = 117
ARM64_INS_FMIN = 118
ARM64_INS_FMLA = 119
ARM64_INS_FMLS = 120
ARM64_INS_FMOV = 121
ARM64_INS_FMSUB = 122
ARM64_INS_FMULX = 123
ARM64_INS_FMUL = 124
ARM64_INS_FNEG = 125
ARM64_INS_FNMADD = 126
ARM64_INS_FNMSUB = 127
ARM64_INS_FNMUL = 128
ARM64_INS_FRECPE = 129
ARM64_INS_FRECPS = 130
ARM64_INS_FRECPX = 131
ARM64_INS_FRINTA = 132
ARM64_INS_FRINTI = 133
ARM64_INS_FRINTM = 134
ARM64_INS_FRINTN = 135
ARM64_INS_FRINTP = 136
ARM64_INS_FRINTX = 137
ARM64_INS_FRINTZ = 138
ARM64_INS_FRSQRTE = 139
ARM64_INS_FRSQRTS = 140
ARM64_INS_FSQRT = 141
ARM64_INS_FSUB = 142
ARM64_INS_HINT = 143
ARM64_INS_HLT = 144
ARM64_INS_HVC = 145
ARM64_INS_IC = 146
ARM64_INS_INS = 147
ARM64_INS_ISB = 148
ARM64_INS_LD1 = 149
ARM64_INS_LD1R = 150
ARM64_INS_LD2 = 151
ARM64_INS_LD2R = 152
ARM64_INS_LD3 = 153
ARM64_INS_LD3R = 154
ARM64_INS_LD4 = 155
ARM64_INS_LD4R = 156
ARM64_INS_LDARB = 157
ARM64_INS_LDAR = 158
ARM64_INS_LDARH = 159
ARM64_INS_LDAXP = 160
ARM64_INS_LDAXRB = 161
ARM64_INS_LDAXR = 162
ARM64_INS_LDAXRH = 163
ARM64_INS_LDPSW = 164
ARM64_INS_LDRSB = 165
ARM64_INS_LDURSB = 166
ARM64_INS_LDRSH = 167
ARM64_INS_LDURSH = 168
ARM64_INS_LDRSW = 169
ARM64_INS_LDR = 170
ARM64_INS_LDTRSB = 171
ARM64_INS_LDTRSH = 172
ARM64_INS_LDTRSW = 173
ARM64_INS_LDURSW = 174
ARM64_INS_LDXP = 175
ARM64_INS_LDXRB = 176
ARM64_INS_LDXR = 177
ARM64_INS_LDXRH = 178
ARM64_INS_LDRH = 179
ARM64_INS_LDURH = 180
ARM64_INS_STRH = 181
ARM64_INS_STURH = 182
ARM64_INS_LDTRH = 183
ARM64_INS_STTRH = 184
ARM64_INS_LDUR = 185
ARM64_INS_STR = 186
ARM64_INS_STUR = 187
ARM64_INS_LDTR = 188
ARM64_INS_STTR = 189
ARM64_INS_LDRB = 190
ARM64_INS_LDURB = 191
ARM64_INS_STRB = 192
ARM64_INS_STURB = 193
ARM64_INS_LDTRB = 194
ARM64_INS_STTRB = 195
ARM64_INS_LDP = 196
ARM64_INS_LDNP = 197
ARM64_INS_STNP = 198
ARM64_INS_STP = 199
ARM64_INS_LSL = 200
ARM64_INS_LSR = 201
ARM64_INS_MADD = 202
ARM64_INS_MLA = 203
ARM64_INS_MLS = 204
ARM64_INS_MOVI = 205
ARM64_INS_MOVK = 206
ARM64_INS_MOVN = 207
ARM64_INS_MOVZ = 208
ARM64_INS_MRS = 209
ARM64_INS_MSR = 210
ARM64_INS_MSUB = 211
ARM64_INS_MUL = 212
ARM64_INS_MVNI = 213
ARM64_INS_MVN = 214
ARM64_INS_NEG = 215
ARM64_INS_NOT = 216
ARM64_INS_ORN = 217
ARM64_INS_ORR = 218
ARM64_INS_PMULL2 = 219
ARM64_INS_PMULL = 220
ARM64_INS_PMUL = 221
ARM64_INS_PRFM = 222
ARM64_INS_PRFUM = 223
ARM64_INS_SQRSHRUN2 = 224
ARM64_INS_SQRSHRUN = 225
ARM64_INS_SQSHRUN2 = 226
ARM64_INS_SQSHRUN = 227
ARM64_INS_RADDHN2 = 228
ARM64_INS_RADDHN = 229
ARM64_INS_RBIT = 230
ARM64_INS_RET = 231
ARM64_INS_REV16 = 232
ARM64_INS_REV32 = 233
ARM64_INS_REV64 = 234
ARM64_INS_REV = 235
ARM64_INS_ROR = 236
ARM64_INS_RSHRN2 = 237
ARM64_INS_RSHRN = 238
ARM64_INS_RSUBHN2 = 239
ARM64_INS_RSUBHN = 240
ARM64_INS_SABAL2 = 241
ARM64_INS_SABAL = 242
ARM64_INS_SABA = 243
ARM64_INS_SABDL2 = 244
ARM64_INS_SABDL = 245
ARM64_INS_SABD = 246
ARM64_INS_SADALP = 247
ARM64_INS_SADDL2 = 248
ARM64_INS_SADDLP = 249
ARM64_INS_SADDLV = 250
ARM64_INS_SADDL = 251
ARM64_INS_SADDW2 = 252
ARM64_INS_SADDW = 253
ARM64_INS_SBC = 254
ARM64_INS_SBFIZ = 255
ARM64_INS_SBFM = 256
ARM64_INS_SBFX = 257
ARM64_INS_SCVTF = 258
ARM64_INS_SDIV = 259
ARM64_INS_SHA1C = 260
ARM64_INS_SHA1H = 261
ARM64_INS_SHA1M = 262
ARM64_INS_SHA1P = 263
ARM64_INS_SHA1SU0 = 264
ARM64_INS_SHA1SU1 = 265
ARM64_INS_SHA256H = 266
ARM64_INS_SHA256H2 = 267
ARM64_INS_SHA256SU0 = 268
ARM64_INS_SHA256SU1 = 269
ARM64_INS_SHADD = 270
ARM64_INS_SHLL2 = 271
ARM64_INS_SHLL = 272
ARM64_INS_SHL = 273
ARM64_INS_SHRN2 = 274
ARM64_INS_SHRN = 275
ARM64_INS_SHSUB = 276
ARM64_INS_SLI = 277
ARM64_INS_SMADDL = 278
ARM64_INS_SMAXP = 279
ARM64_INS_SMAXV = 280
ARM64_INS_SMAX = 281
ARM64_INS_SMC = 282
ARM64_INS_SMINP = 283
ARM64_INS_SMINV = 284
ARM64_INS_SMIN = 285
ARM64_INS_SMLAL2 = 286
ARM64_INS_SMLAL = 287
ARM64_INS_SMLSL2 = 288
ARM64_INS_SMLSL = 289
ARM64_INS_SMOV = 290
ARM64_INS_SMSUBL = 291
ARM64_INS_SMULH = 292
ARM64_INS_SMULL2 = 293
ARM64_INS_SMULL = 294
ARM64_INS_SQABS = 295
ARM64_INS_SQADD = 296
ARM64_INS_SQDMLAL2 = 297
ARM64_INS_SQDMLAL = 298
ARM64_INS_SQDMLSL2 = 299
ARM64_INS_SQDMLSL = 300
ARM64_INS_SQDMULH = 301
ARM64_INS_SQDMULL2 = 302
ARM64_INS_SQDMULL = 303
ARM64_INS_SQNEG = 304
ARM64_INS_SQRDMULH = 305
ARM64_INS_SQRSHL = 306
ARM64_INS_SQRSHRN = 307
ARM64_INS_SQRSHRN2 = 308
ARM64_INS_SQSHLU = 309
ARM64_INS_SQSHL = 310
ARM64_INS_SQSHRN = 311
ARM64_INS_SQSHRN2 = 312
ARM64_INS_SQSUB = 313
ARM64_INS_SQXTN = 314
ARM64_INS_SQXTN2 = 315
ARM64_INS_SQXTUN = 316
ARM64_INS_SQXTUN2 = 317
ARM64_INS_SRHADD = 318
ARM64_INS_SRI = 319
ARM64_INS_SRSHL = 320
ARM64_INS_SRSHR = 321
ARM64_INS_SRSRA = 322
ARM64_INS_SSHLL2 = 323
ARM64_INS_SSHLL = 324
ARM64_INS_SSHL = 325
ARM64_INS_SSHR = 326
ARM64_INS_SSRA = 327
ARM64_INS_SSUBL2 = 328
ARM64_INS_SSUBL = 329
ARM64_INS_SSUBW2 = 330
ARM64_INS_SSUBW = 331
ARM64_INS_ST1 = 332
ARM64_INS_ST2 = 333
ARM64_INS_ST3 = 334
ARM64_INS_ST4 = 335
ARM64_INS_STLRB = 336
ARM64_INS_STLR = 337
ARM64_INS_STLRH = 338
ARM64_INS_STLXP = 339
ARM64_INS_STLXRB = 340
ARM64_INS_STLXR = 341
ARM64_INS_STLXRH = 342
ARM64_INS_STXP = 343
ARM64_INS_STXRB = 344
ARM64_INS_STXR = 345
ARM64_INS_STXRH = 346
ARM64_INS_SUBHN2 = 347
ARM64_INS_SUBHN = 348
ARM64_INS_SUB = 349
ARM64_INS_SUQADD = 350
ARM64_INS_SVC = 351
ARM64_INS_SXTB = 352
ARM64_INS_SXTH = 353
ARM64_INS_SXTW | |
import math
from math import log2, exp
import numpy as np
import torch
from torch import nn
from torch.nn.functional import softplus
import torch.nn.functional as F
from torch.autograd import grad
from typing import List, Callable, Union, Any, TypeVar, Tuple
# from torch import tensor as Tensor
Tensor = TypeVar('torch.tensor')
from CALAE.loss.hessian_penalty import hessian_penalty
from CALAE.metrics.perceptual import PerceptualLoss
import lpips
import piq
def zero_centered_gradient_penalty(real_samples, real_prediction):
"""
Computes zero-centered gradient penalty for E, D
"""
grad_outputs = torch.ones_like(real_prediction, requires_grad=True)
squared_grad_wrt_x = grad(outputs=real_prediction, inputs=real_samples, grad_outputs=grad_outputs,\
create_graph=True, retain_graph=True)[0].pow(2)
return squared_grad_wrt_x.view(squared_grad_wrt_x.shape[0], -1).sum(dim=1).mean()
def loss_discriminator(E, D, alpha, real_samples, fake_samples, gamma=10, use_bce=False,
enable_hessian_real=False, enable_hessian_fake=False,
hessian_layers_fake=[-2], hessian_layers_real=[-2]):
E_r = E(real_samples, alpha)
E_f = E(fake_samples, alpha)
real_prediction, fake_prediction = D(E_r), D(E_f)
if use_bce:
loss = adv_loss(real_prediction, 1)
loss += adv_loss(fake_prediction, 0)
else:
# Minimize negative = Maximize positive (Minimize incorrect D predictions for real data,
# minimize incorrect D predictions for fake data)
loss = (F.softplus(-real_prediction) + F.softplus(fake_prediction)).mean()
if gamma > 0:
loss += zero_centered_gradient_penalty(real_samples, real_prediction).mul(gamma/2)
return loss
def loss_discriminator_img(D, real_samples, fake_samples, gamma=10, use_bce=False):
real_prediction = D(real_samples)
fake_prediction = D(fake_samples)
if use_bce:
loss = adv_loss(real_prediction, 1)
loss += adv_loss(fake_prediction, 0)
else:
# Minimize negative = Maximize positive (Minimize incorrect D predictions for real data,
# minimize incorrect D predictions for fake data)
loss = (F.softplus(-real_prediction) + F.softplus(fake_prediction)).mean()
if gamma > 0:
loss += zero_centered_gradient_penalty(real_samples, real_prediction).mul(gamma/2)
return loss
def loss_generator(E, D, alpha, fake_samples, enable_hessian=True, hessian_layers=[-1,-2], current_layer=[-1], hessian_weight=0.01):
# Hessian applied to E here
# Minimize negative = Maximize positive (Minimize correct D predictions for fake data)
E_z = E(fake_samples, alpha)
loss = softplus(-D(E_z)).mean()
if enable_hessian:
for layer in hessian_layers:
h_loss = hessian_penalty(E, z=fake_samples, alpha=alpha, return_norm=layer) * hessian_weight
if layer in current_layer:
h_loss = h_loss * alpha
loss += h_loss
return loss
def loss_avg_generator(G, G_avg, F_z, scale, alpha, loss_fn, bbox=None):
# Hessian applied to G here
G_z = G(F_z, scale, alpha, bbox=bbox)
G_avg_z = G_avg(F_z, scale, alpha, bbox=bbox)
loss = loss_fn(G_z, G_avg_z)
return loss
def loss_generator_consistency(fake, real, loss_fn=None, use_perceptual=False,
use_ssim=True, ssim_weight=1, use_ssim_tv=False,
use_sobel=True, sobel_weight=1,
use_sobel_tv=False, sobel_fn=None):
if loss_fn:
if use_perceptual:
scale = fake.shape[2]
p_scale = scale if scale < 32 else 32
p_func = perceptual_loss[p_scale]
if p_func is None:
p_func = PerceptualLoss(ilayer=percep_layer_lookup[p_scale])
perceptual_loss[scale] = p_func
loss = loss_fn(p_func(fake), p_func(real))
else:
loss = loss_fn(fake, real)
else:
loss = 0
if use_ssim:
s_loss = ssim_loss(fake, real) * ssim_weight
if use_ssim_tv:
s_loss = s_loss / total_variation(fake)
loss *= s_loss
if use_sobel:
sobel_real = sobel(real)
sobel_fake = sobel(fake)
if use_sobel_tv:
sobel_real = sobel_real / total_variation(fake)
sobel_fake = sobel_fake / total_variation(fake)
if sobel_fn:
sobel_loss = sobel_fn(sobel_real, sobel_fake)
else:
sim, cs = ssim(sobel_real, sobel_fake, window_size=11, size_average=True, full=True, val_range=2)
sim = (1 - sim) / 2
cs = (1 - cs) / 2
sobel_loss = (sim + cs) ** cs
loss += sobel_loss * sobel_weight
return loss
def loss_autoencoder(F, G, E, scale, alpha, z, loss_fn,
labels=None, use_tv=False, tv_weight=0.001,
permute_regularize=False, bbox=None):
# Hessian applied to G here
F_z = F(z, scale, z2=None, p_mix=0)
# Autoencoding loss in latent space
G_z = G(F_z, scale, alpha, bbox=bbox)
E_z = E(G_z, alpha)
#E_z = E_z.reshape(E_z.shape[0], 1, E_z.shape[1]).repeat(1, F_z.shape[1], 1)
F_x = F_z[:,0,:]
if labels is not None:
if permute_regularize:
perm = torch.randperm(E_z.shape[0], device=E_z.device)
E_z_hat = torch.index_select(E_z, 0, perm)
F_x_hat = torch.index_select(F_x, 0, perm)
F_hat = torch.cat([F_x, F_x_hat], 0)
E_hat = torch.cat([E_z, E_z_hat], 0)
loss = loss_fn(F_hat, E_hat, labels)
else:
loss = loss_fn(F_x, E_z, labels)
else:
loss = loss_fn(F_x, E_z)
if use_tv:
loss += total_variation(G_z) * tv_weight
return loss
################################################################################
#### H E S S I A N #############################################################
###################-------------------------------------------------------------
# GENERATOR
def loss_generator_hessian(G, F, z, scale, alpha,
scale_alpha=False,
hessian_layers=[3],
current_layer=[0],
hessian_weight=0.01):
loss = hessian_penalty(G, z=F(z, scale, z2=None, p_mix=0), scale=scale, alpha=alpha, return_norm=hessian_layers)
if current_layer in hessian_layers or scale_alpha:
loss = loss * alpha
return loss * hessian_weight
# ENCODER
def loss_encoder_hessian(E, samples, alpha, scale_alpha=False,
hessian_layers=[-1,-2], current_layer=[-1],
hessian_weight=0.01):
loss = hessian_penalty(E, z=samples, alpha=alpha, return_norm=hessian_layers)
if current_layer in hessian_layers or scale_alpha:
loss = loss * alpha
return loss * hessian_weight
################################################################################
#### F O U R I E R #############################################################
###################-------------------------------------------------------------
def fft_loss(x, y, dim=2, diff_fn=lambda x,y: torch.abs(x-y)):
xf = torch.rfft(x, 3)
yf = torch.rfft(y, 3)
diff = diff_fn(xf[dim], yf[dim])
loss = diff.mean()
return loss
################################################################################
#### S T A N D A R D ###########################################################
#####################-----------------------------------------------------------
# Generally applicable losses?
def msle(x, y):
return (torch.log(x) - torch.log(y)).pow(2).mean()
def mse(x, y):
return (x - y).pow(2).mean()
def mae(x, y):
return torch.abs(x - y).mean()
def logcosh(x, y):
diff = x - y
loss = (diff + 1e-12).cosh().log()
return loss.mean()
def xtanh(x, y):
diff = x - y
loss = diff.tanh() * diff
return loss.mean()
def xsigmoid(x, y):
diff = x - y
loss = 1 + (-diff).exp()
loss = loss - diff
loss = 2 * diff / loss
return loss.mean()
#return torch.mean(2 * diff / (1 + torch.exp(-diff)) - diff)
def correlation(x, y):
delta = torch.abs(x - y)
loss = torch.mean((delta[:-1] * delta[1:]).sum(1))
return loss
# Simple BCE Discriminator target
def adv_loss(logits, target):
assert target in [1, 0]
targets = torch.full_like(logits, fill_value=target)
loss = F.binary_cross_entropy_with_logits(logits, targets)
return loss
####################################################################################
#### P E R C E P T U A L ###########################################################
#########################-----------------------------------------------------------
## Perceptual Loss
percep_layer_lookup = {
4: 4,
8: 9,
16: 16,
32: 23
}
perceptual_loss = {
4: None,
8: None,
16: None,
32: None,
}
def percep_loss(x, y, scale):
p_scale = scale if scale < 32 else 32
p_func = perceptual_loss[p_scale]
if p_func is None:
p_func = PerceptualLoss(ilayer=percep_layer_lookup[p_scale])
perceptual_loss[scale] = p_func
loss = p_func(x) - p_func(y)
loss = loss.pow(2)
loss = loss.mean()
return loss
######################################################################################
### FAMOS losses - https://github.com/zalandoresearch/famos/blob/master/utils.py #####
##some image level content loss
def contentLoss(a, b, netR, loss_type):
def nr(x):
return (x**2).mean()
return x.abs().mean()
if loss_type==0:
a = avgG(a)
b = avgG(b)
return nr(a.mean(1) - b.mean(1))
if loss_type==1:
a = avgP(a)
b = avgP(b)
return nr(a.mean(1) - b.mean(1))
if loss_type==10:
return nr(netR(a)-netR(b))
if loss_type==100:
return nr(netR(a)-b)
if loss_type == 101:
return nr(avgG(netR(a)) - avgG(b))
if loss_type == 102:
return nr(avgP(netR(a)) - avgP(b))
if loss_type == 103:
return nr(avgG(netR(a)).mean(1) - avgG(b).mean(1))
raise Exception("NYI")
def GaussKernel(sigma,wid=None):
if wid is None:
wid =2 * 2 * sigma + 1+10
def gaussian(x, mu, sigma):
return np.exp(-(float(x) - float(mu)) ** 2 / (2 * sigma ** 2))
def make_kernel(sigma):
# kernel radius = 2*sigma, but minimum 3x3 matrix
kernel_size = max(3, int(wid))
kernel_size = min(kernel_size,150)
mean = np.floor(0.5 * kernel_size)
kernel_1d = np.array([gaussian(x, mean, sigma) for x in range(kernel_size)])
# make 2D kernel
np_kernel = np.outer(kernel_1d, kernel_1d).astype(dtype=np.float32)
# normalize kernel by sum of elements
kernel = np_kernel / np.sum(np_kernel)
return kernel
ker = make_kernel(sigma)
a = np.zeros((3,3,ker.shape[0],ker.shape[0])).astype(dtype=np.float32)
for i in range(3):
a[i,i] = ker
return a
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
gsigma=1.##how much to blur - larger blurs more ##+"_sig"+str(gsigma)
gwid=61
kernel = torch.FloatTensor(GaussKernel(gsigma,wid=gwid)).to(device)
def avgP(x):
return nn.functional.avg_pool2d(x,int(16))
def avgG(x):
pad=nn.functional.pad(x,(gwid//2,gwid//2,gwid//2,gwid//2),'reflect')##last 2 dimensions padded
return nn.functional.conv2d(pad,kernel)##reflect pad should avoid border artifacts
########################################################################################
#### T O T A L - V A R I A T I O N #####################################################
###################################-----------------------------------------------------
def tv_loss(x, y, loss_fn):
loss = loss_fn(total_variation(x), total_variation(y))
return loss
#absolute difference in X and Y directions
def total_variation(y):
return torch.mean(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) + torch.mean(torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :]))
##2D array of the edges of C channels image
def tvArray(x):
border1 = x[:, :, :-1] - x[:, :, 1:]
border1 = torch.cat([border1.abs().sum(1).unsqueeze(1), x[:, :1, :1] * 0], 2) ##so square with extra 0 line
border2 = x[:, :, :, :-1] - x[:, :, :, 1:]
border2 = torch.cat([border2.abs().sum(1).unsqueeze(1), x[:, :1, :, :1] * 0], 3)
border = torch.cat([border1, border2], 1)
return border
##########################################################################################
#### G R A M #############################################################################
#############-----------------------------------------------------------------------------
def gram_loss(x, y):
loss = gramMatrix(x, x).exp() - gramMatrix(y, y).exp()
loss = loss.abs()
loss = loss.mean()
return loss
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
##negative gram matrix
def gramMatrix(x,y=None,sq=True,bEnergy=False):
if y is None:
y = x
B, CE, width, height = x.size()
hw = width * height
energy = torch.bmm(x.permute(2, 3, 0, 1).view(hw, B, CE),
y.permute(2, 3, 1, 0).view(hw, CE, B), )
energy = energy.permute(1, 2, 0).view(B, B, width, height)
if bEnergy:
return energy
sqX = (x ** 2).sum(1).unsqueeze(0)
sqY = (y ** 2).sum(1).unsqueeze(1)
d=-2 * energy + sqX + sqY
if not sq:
return d##debugging
gram = -torch.clamp(d, | |
"""The base class for many SciUnit objects."""
import sys
PLATFORM = sys.platform
PYTHON_MAJOR_VERSION = sys.version_info.major
if PYTHON_MAJOR_VERSION < 3: # Python 2
raise Exception('Only Python 3 is supported')
import json, git, pickle, hashlib
import numpy as np
import pandas as pd
from pathlib import Path
from git.exc import GitCommandError, InvalidGitRepositoryError
from git.cmd import Git
from git.remote import Remote
from git.repo.base import Repo
from typing import Dict, List, Optional, Tuple, Union, Any
from io import StringIO
try:
import tkinter
except ImportError:
tkinter = None
KERNEL = ('ipykernel' in sys.modules)
HERE = Path(__file__).resolve().parent.name
class Versioned(object):
"""A Mixin class for SciUnit objects.
Provides a version string based on the Git repository where the model
is tracked. Provided in part by <NAME> in issue #53.
"""
def get_repo(self, cached: bool=True) -> Repo:
"""Get a git repository object for this instance.
Args:
cached (bool, optional): Whether to use cached data. Defaults to True.
Returns:
Repo: The git repo for this instance.
"""
module = sys.modules[self.__module__]
# We use module.__file__ instead of module.__path__[0]
# to include modules without a __path__ attribute.
if hasattr(self.__class__, '_repo') and cached:
repo = self.__class__._repo
elif hasattr(module, '__file__'):
path = Path(module.__file__).resolve()
try:
repo = git.Repo(path, search_parent_directories=True)
except InvalidGitRepositoryError:
repo = None
else:
repo = None
self.__class__._repo = repo
return repo
def get_version(self, cached: bool=True) -> str:
"""Get a git version (i.e. a git commit hash) for this instance.
Args:
cached (bool, optional): Whether to use the cached data. Defaults to True.
Returns:
str: The git version for this instance.
"""
if cached and hasattr(self.__class__, '_version'):
version = self.__class__._version
else:
repo = self.get_repo()
if repo is not None:
head = repo.head
version = head.commit.hexsha
if repo.is_dirty():
version += "*"
else:
version = None
self.__class__._version = version
return version
version = property(get_version)
def get_remote(self, remote: str='origin') -> Remote:
"""Get a git remote object for this instance.
Args:
remote (str, optional): The remote Git repo. Defaults to 'origin'.
Returns:
Remote: The git remote object for this instance.
"""
repo = self.get_repo()
if repo is not None:
remotes = {r.name: r for r in repo.remotes}
r = repo.remotes[0] if remote not in remotes else remotes[remote]
else:
r = None
return r
def get_remote_url(self, remote: str='origin', cached: bool=True) -> str:
"""Get a git remote URL for this instance.
Args:
remote (str, optional): The remote Git repo. Defaults to 'origin'.
cached (bool, optional): Whether to use cached data. Defaults to True.
Raises:
ex: A Git command error.
Returns:
str: The git remote URL for this instance.
"""
if hasattr(self.__class__, '_remote_url') and cached:
url = self.__class__._remote_url
else:
r = self.get_remote(remote)
try:
url = list(r.urls)[0]
except GitCommandError as ex:
if 'correct access rights' in str(ex):
# If ssh is not setup to access this repository
cmd = ['git', 'config', '--get', 'remote.%s.url' % r.name]
url = Git().execute(cmd)
else:
raise ex
except AttributeError:
url = None
if url is not None and url.startswith('git@'):
domain = url.split('@')[1].split(':')[0]
path = url.split(':')[1]
url = "http://%s/%s" % (domain, path)
self.__class__._remote_url = url
return url
remote_url = property(get_remote_url)
class SciUnit(Versioned):
"""Abstract base class for models, tests, and scores."""
def __init__(self):
"""Instantiate a SciUnit object."""
self.unpicklable = []
#: A list of attributes that cannot or should not be pickled.
unpicklable = []
#: A URL where the code for this object can be found.
_url = None
#: A verbosity level for printing information.
verbose = 1
def __getstate__(self) -> dict:
"""Copy the object's state from self.__dict__.
Contains all of the instance attributes. Always uses the dict.copy()
method to avoid modifying the original state.
Returns:
dict: The state of this instance.
"""
state = self.__dict__.copy()
# Remove the unpicklable entries.
if hasattr(self, 'unpicklable'):
for key in set(self.unpicklable).intersection(state):
del state[key]
return state
def _state(self, state: dict=None, keys: list=None,
exclude: List[str]=None) -> dict:
"""Get the state of the instance.
Args:
state (dict, optional): The dict instance that contains a part of state info of this instance.
Defaults to None.
keys (list, optional): Some keys of `state`. Values in `state` associated with these keys will be kept
and others will be discarded. Defaults to None.
exclude (List[str], optional): The list of keys. Values in `state` that associated with these keys
will be removed from `state`. Defaults to None.
Returns:
dict: The state of the current instance.
"""
if state is None:
state = self.__getstate__()
if keys:
state = {key: state[key] for key in keys if key in state.keys()}
if exclude:
state = {key: state[key] for key in state.keys()
if key not in exclude}
state = deep_exclude(state, exclude)
return state
def _properties(self, keys: list=None, exclude: list=None) -> dict:
"""Get the properties of the instance.
Args:
keys (list, optional): If not None, only the properties that are in `keys` will be included in
the return data. Defaults to None.
exclude (list, optional): The list of properties that will not be included in return data. Defaults to None.
Returns:
dict: The dict of properties of the instance.
"""
result = {}
props = self.raw_props()
exclude = exclude if exclude else []
exclude += ['state', 'id']
for prop in set(props).difference(exclude):
if prop == 'properties':
pass # Avoid infinite recursion
elif not keys or prop in keys:
result[prop] = getattr(self, prop)
return result
def raw_props(self) -> list:
"""Get the raw properties of the instance.
Returns:
list: The list of raw properties.
"""
class_attrs = dir(self.__class__)
return [p for p in class_attrs
if isinstance(getattr(self.__class__, p, None), property)]
@property
def state(self) -> dict:
"""Get the state of the instance.
Returns:
dict: The state of the instance.
"""
return self._state()
@property
def properties(self) -> dict:
"""Get the properties of the instance.
Returns:
dict: The properties of the instance.
"""
return self._properties()
@classmethod
def dict_hash(cls, d: dict) -> str:
"""SHA224 encoded value of `d`.
Args:
d (dict): The dict instance to be SHA224 encoded.
Returns:
str: SHA224 encoded value of `d`.
"""
od = [(key, d[key]) for key in sorted(d)]
try:
s = pickle.dumps(od)
except AttributeError:
s = json.dumps(od, cls=SciUnitEncoder).encode('utf-8')
return hashlib.sha224(s).hexdigest()
@property
def hash(self) -> str:
"""A unique numeric identifier of the current model state.
Returns:
str: The unique numeric identifier of the current model state.
"""
return self.dict_hash(self.state)
def json(self, add_props: bool=False, keys: list=None, exclude: list=None, string: bool=True,
indent: None=None) -> str:
"""Generate a Json format encoded sciunit instance.
Args:
add_props (bool, optional): Whether to add additional properties of the object to the serialization. Defaults to False.
keys (list, optional): Only the keys in `keys` will be included in the json content. Defaults to None.
exclude (list, optional): The keys in `exclude` will be excluded from the json content. Defaults to None.
string (bool, optional): The json content will be `str` type if True, `dict` type otherwise. Defaults to True.
indent (None, optional): If indent is a non-negative integer or string, then JSON array elements and object members
will be pretty-printed with that indent level. An indent level of 0, negative, or "" will only
insert newlines. None (the default) selects the most compact representation. Using a positive integer
indent indents that many spaces per level. If indent is a string (such as "\t"), that string is
used to indent each level (source: https://docs.python.org/3/library/json.html#json.dump).
Defaults to None.
Returns:
str: The Json format encoded sciunit instance.
"""
result = json.dumps(self, cls=SciUnitEncoder,
add_props=add_props, keys=keys, exclude=exclude,
indent=indent)
if not string:
result = json.loads(result)
return result
@property
def _id(self) -> Any:
return id(self)
@property
def _class(self) -> dict:
url = '' if self.url is None else self.url
import_path = '{}.{}'.format(
self.__class__.__module__,
self.__class__.__name__
)
return {'name': self.__class__.__name__,
'import_path': import_path,
'url': url}
@property
def id(self) -> str:
return str(self.json)
@property
def url(self) -> str:
return self._url if self._url else self.remote_url
class SciUnitEncoder(json.JSONEncoder):
"""Custom JSON encoder for SciUnit objects."""
def __init__(self, *args, **kwargs):
for key in ['add_props', 'keys', 'exclude']:
if key in kwargs:
setattr(self.__class__, key, kwargs[key])
kwargs.pop(key)
super(SciUnitEncoder, self).__init__(*args, **kwargs)
def default(self, obj: Any) -> Union[str, dict, list]:
"""Try to encode the object.
Args:
| |
# self.widgets['lock'][index].setChecked(channel.lock)
# Set the error boolean (true if the lock is active and we are outside the error threshold)
if channel.lock and np.abs(channel.data[-1] - channel.setpoint) > self.threshold:
self.widgets['error_status'][index].setChecked(True)
else:
self.widgets['error_status'][index].setChecked(False)
# Now update lock + voltage plots
self.widgets['curve'][4 * index + 2].setData(channel.voltage)
self.widgets['voltage'][index].setValue(channel.voltage[-1])
self.widgets['curve'][4 * index + 3].setData(channel.error)
self.widgets['error'][index].setValue(channel.error[-1])
def _get_gui_data(self):
""" Updates setpoint and lock parameters with data pulled from GUI
Does not overwrite the script setpoints and locks, but stores the GUI values for comparison based on context.
See Channel.update() method for behavior on how script chooses whether to use internal values or GUI values
"""
for index, channel in enumerate(self.channels):
# Pull the current value from the GUI
channel.gui_setpoint = self.widgets['sp'][index].value()
channel.gui_lock = self.widgets['lock'][index].isChecked()
def _get_channels(self):
""" Returns all active channel numbers
Usually used for checking whether a newly input channel has already been assigned to the script
:return: (list) all active channel numbers
"""
channel_list = []
for channel in self.channels:
channel_list.append(channel.number)
return channel_list
def get_wavelength(self, channel):
# Index of channel
physical_channel = self.channels[self._get_channels().index(channel)]
return self.wlm_client.get_wavelength(physical_channel.number)
class Service(ServiceBase):
""" A service to enable external updating of WlmMonitor parameters """
def exposed_update_parameters(self, params_pickle):
params = pickle.loads(params_pickle)
return self._module.update_parameters(params)
def exposed_clear_channel(self, channel):
return self._module.clear_channel(channel)
def exposed_reconnect_gui(self):
return self._module.reconnect_gui()
def exposed_zero_voltage(self, channel):
return self._module.zero_voltage(channel)
def exposed_pause(self):
if isinstance(self._module, list):
for module in self._module:
module.pause()
return 0
else:
return self._module.pause()
def exposed_resume(self):
return self._module.resume()
def exposed_go_to(self, channel, value, step_size, hold_time):
return self._module.go_to(channel, value, step_size, hold_time)
def exposed_get_wavelength(self, channel):
return self._module.get_wavelength(channel)
class Client(ClientBase):
def update_parameters(self, params):
params_pickle = pickle.dumps(params)
return self._service.exposed_update_parameters(params_pickle)
def get_wavelength(self, channel):
return self._service.exposed_get_wavelength(channel)
def clear_channel(self, channel):
return self._service.exposed_clear_channel(channel)
def zero_voltage(self, channel):
return self._service.exposed_zero_voltage(channel)
def reconnect_gui(self):
return self._service.exposed_reconnect_gui()
def pause(self):
return self._service.exposed_pause()
def resume(self):
return self._service.exposed_resume()
def go_to(self, channel, value, step_size=0.001, hold_time=0.1):
""" Sends laser to a setpoint value gradually
:param channel: (int) channel number on wavemeter
:param value: (float) value to set laser frequency to
:param step_size: (float) step size in THz for laser freq steps
:param hold_time: (float) time in seconds to wait between steps
"""
return self._service.exposed_go_to(channel, value, step_size, hold_time)
class Channel:
"""Object containing all information regarding a single wavemeter channel"""
def __init__(self, channel_params, ao_clients=None, log: LogHandler = None):
"""
Initializes all parameters given, sets others to default. Also sets up some defaults + placeholders for data
:param channel_params: (dict) Dictionary of channel parameters (see WlmMonitor.set_parameters() for details)
:param ao_clients: (dict, optional) Dictionary containing ao clients tying a keyname string to the actual client
:param log: (LogHandler) instance of LogHandler for logging metadata
"""
# Set channel parameters to default values
self.ao_clients = ao_clients
self.log = log
self.ao = None # Dict with client name and channel for ao to use
self.voltage = None # Array of voltage values for ao, used for plotting/monitoring voltage
self.current_voltage = 0
self.setpoint = None
self.lock = False
self.error = None # Array of error values, used for plotting/monitoring lock error
self.labels_updated = False # Flag to check if we have updated all labels
self.setpoint_override = 0 # Flag to check if setpoint has been updated + GUI should be overridden
# self.lock_override = True # Flag to check if lock has been updated + GUI should be overridden
self.gui_setpoint = 0 # Current GUI setpoint
self.gui_lock = False # Current GUI lock boolean
self.prev_gui_lock = None # Previous GUI lock boolean
self.prev_gui_setpoint = None # Previous GUI setpoint
self._min_voltage = None
self._max_voltage = None
self._gain = None
# Set all relevant parameters to default values
self._overwrite_parameters(channel_params)
# Initialize relevant placeholders
self.data = np.array([])
self.sp_data = np.array([])
def initialize(self, wavelength, display_pts=5000):
"""
Initializes the channel based on the current wavelength
:param wavelength: current wavelength
:param display_pts: number of points to display on the plot
"""
self.data = np.ones(display_pts) * wavelength
self.sp_data = np.ones(display_pts) * self.data[-1]
self.setpoint = self.data[-1]
# self.setpoint_override = True
# self.lock_override = True
# Initialize voltage and error
self.voltage = np.ones(display_pts) * self.current_voltage
# Check that setpoint is reasonable, otherwise set error to 0
self.error = np.ones(display_pts) * (wavelength - self.setpoint)
def initialize_sp_data(self, display_pts=5000):
self.sp_data = np.ones(display_pts) * self.data[-1]
def update(self, wavelength):
"""
Updates the data, setpoints, and all locks
:param wavelength: (float) current wavelength
"""
self.data = np.append(self.data[1:], wavelength)
# Pick which setpoint to use
# If the setpoint override is on, this means we need to try and set the GUI value to self.setpoint
# if self.setpoint_override:
# # Check if the GUI has actually caught up
# if self.setpoint == self.gui_setpoint:
# self.setpoint_override = False
# # Otherwise, the GUI still hasn't implemented the setpoint prescribed by update_parameters()
# # If setpoint override is off, this means the GUI caught up to our last update_parameters() call, and we
# # should refrain from updating the value until we get a new value from the GUI
# else:
# Check if the GUI has changed, and if so, update the setpoint in the script to match
if self.gui_setpoint != self.prev_gui_setpoint:
self.setpoint = copy.deepcopy(self.gui_setpoint)
metadata = {f'{self.name}_laser_setpoint': self.setpoint}
self.log.update_metadata(**metadata)
# Otherwise the GUI is static AND parameters haven't been updated so we don't change the setpoint at all
# Store the latest GUI setpoint
self.prev_gui_setpoint = copy.deepcopy(self.gui_setpoint)
self.sp_data = np.append(self.sp_data[1:], self.setpoint)
# Now deal with pid stuff
self.pid.set_parameters(setpoint=0 if self.setpoint is None else self.setpoint)
# Implement lock
# Set process variable
self.pid.set_pv(pv=self.data[len(self.data) - self.memory:])
# Set control variable
self.pid.set_cv()
# See logic for setpoint above
# if self.lock_override:
# if self.lock == self.gui_lock:
# self.lock_override = False
# else:
if self.gui_lock != self.prev_gui_lock:
self.lock = copy.deepcopy(self.gui_lock)
self.prev_gui_lock = copy.deepcopy(self.gui_lock)
if self.lock:
try:
if self.ao is not None:
if self._min_voltage <= self.current_voltage + self.pid.cv * self._gain <= self._max_voltage:
self.current_voltage += self.pid.cv * self._gain
elif self.current_voltage + self.pid.cv * self._gain < self._min_voltage:
self.current_voltage = self._min_voltage
else:
self.current_voltage = self._max_voltage
self.ao['client'].set_ao_voltage(
ao_channel=self.ao['channel'],
voltages=[self.current_voltage]
)
except EOFError:
self.ao = None
# Update voltage and error data
self.voltage = np.append(self.voltage[1:], self.current_voltage)
self.error = np.append(self.error[1:], self.pid.error * self._gain)
def zero_voltage(self):
"""Zeros the voltage (if applicable)"""
try:
if self.ao is not None:
v_set = (self._min_voltage + self._max_voltage) / 2
self.ao['client'].set_ao_voltage(
ao_channel=self.ao['channel'],
voltages=[v_set]
)
self.current_voltage = v_set
except EOFError:
self.ao = None
def _overwrite_parameters(self, channel_params):
""" Sets all internal channel parameters to input
If parameters are not given, they are overwritten to defaults - see implementation below, as well as the
WlmMonitor.set_parameters() docstring for default details
:param channel_params: (dict) dictionary containing all parameters. See WlmMonitor.set_parameters() for details
"""
# Initialize all given attributes, otherwise initialize defaults
if 'channel' in channel_params:
self.number = channel_params['channel']
else:
# By default use channel 1
self.number = 1
if 'name' in channel_params:
self.name = channel_params['name']
else:
# Initialize some random channel name if not given
self.name = 'Channel ' + str(np.random.randint(1000000))
self.curve_name = self.name + ' Frequency' # Name used for identifying the frequency Curve object
self.lock_name = self.name + ' Lock' # Name used for identifying lock Scalar object
self.error_name = self.name + ' Error' # Name used for identifying error Scalar object
if 'setpoint' in channel_params:
self.setpoint = channel_params['setpoint']
else:
self.setpoint = None
self.setpoint_name = self.name + ' Setpoint' # Name used for identifying setpoint Curve object
if 'lock' in channel_params:
self.lock = channel_params['lock']
else:
self.lock = False
if 'memory' in channel_params:
self.memory = channel_params['memory']
else:
self.memory = 20
if 'pid' in channel_params:
self.pid = PID(
p=channel_params['pid']['p'],
i=channel_params['pid']['i'],
d=channel_params['pid']['d'],
memory=self.memory,
setpoint=0 if self.setpoint is None else self.setpoint
)
else:
# Just initialize a default pid module
self.pid = PID()
if 'ao' in channel_params and self.ao_clients is not None:
# Convert ao from string to object using lookup
try:
self.ao = {
'client': self.ao_clients[(
channel_params['ao']['client'],
channel_params['ao']['config']
)],
'channel': channel_params['ao']['channel']
}
try:
self.current_voltage = self.ao['client'].voltage()
except:
self.current_voltage = 0
except KeyError:
# Alert the user that ao initialization failed
self.ao = None
print('Failed to initialize ao for Channel {}'.format(self.number))
# If ao is not given just leave it as None
else:
| |
<filename>MsLightweaverManager.py
import pickle
import numpy as np
import matplotlib.pyplot as plt
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, Fe_atom, FeI_atom, MgII_atom, N_atom, Na_atom, S_atom, CaII_atom
from lightweaver.atmosphere import Atmosphere, ScaleType
from lightweaver.atomic_table import DefaultAtomicAbundance
from lightweaver.atomic_set import RadiativeSet, SpeciesStateTable
from lightweaver.molecule import MolecularTable
from lightweaver.LwCompiled import LwContext
from lightweaver.utils import InitialSolution, planck, NgOptions, ConvergenceError, compute_radiative_losses, integrate_line_losses
import lightweaver.constants as Const
import lightweaver as lw
from typing import List
from copy import deepcopy
from MsLightweaverAtoms import H_6, CaII, H_6_nasa, CaII_nasa
import os
import os.path as path
import time
from radynpy.matsplotlib import OpcFile
from radynpy.utils import hydrogen_absorption
from numba import njit
from pathlib import Path
from scipy.linalg import solve
from scipy.interpolate import interp1d, PchipInterpolator
# from HydroWeno.Simulation import Grid
# from HydroWeno.Advector import Advector
# from HydroWeno.BCs import zero_grad_bc
# from HydroWeno.Weno import reconstruct_weno_nm_z
import warnings
from traceback import print_stack
from weno4 import weno4
from RadynAdvection import an_sol, an_rad_sol, an_gml_sol
import pdb
def weno4_pos(xs, xp, fp, **kwargs):
return np.exp(weno4_safe(xs, xp, np.log(fp), **kwargs))
# https://stackoverflow.com/a/21901260
import subprocess
def mslightweaver_revision():
p = Path(__file__).parent
isGitRepo = subprocess.check_output(['git', 'rev-parse', '--is-inside-work-tree'], cwd=p).decode('ascii').strip() == 'true'
if not isGitRepo:
raise ValueError('Cannot find git info.')
gitChanges = subprocess.check_output(['git', 'status', '--porcelain', '--untracked-files=no'], cwd=p).decode('ascii').strip()
if len(gitChanges) > 0:
raise ValueError('Uncommitted changes to tracked files, cannot procede:\n%s' % gitChanges)
return subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=p).decode('ascii').strip()
def check_write_git_revision(outputDir):
revision = mslightweaver_revision()
with open(outputDir + 'GitRevision.txt', 'w') as f:
f.write(revision)
def nr_advect(atmost, i0, eqPops, activeAtomNames, abundances):
d1 = atmost.d1[i0+1]
for a in activeAtomNames:
pop = np.zeros_like(eqPops[a])
for i in range(pop.shape[0]):
pop[i, :] = an_sol(atmost, i0, eqPops[a][i], tol=1e-8, maxIter=1000)
nTotal = d1 / (abundances.massPerH * lw.Amu) * abundances[a]
popCorrectionFactor = nTotal / pop.sum(axis=0)
print('Max Correction %s: %.2e' % (a, np.abs(1-popCorrectionFactor).max()))
pop *= popCorrectionFactor
eqPops[a][...] = pop
class CoronalIrraditation(lw.BoundaryCondition):
def __init__(self):
# NOTE(cmo): This data needs to be in (mu, toObs) order, i.e. mu[0]
# down, mu[0] up, mu[1] down...
# self.I = I1d.reshape(I1d.shape[0], -1, I1d.shape[-1])
self.I = None
def set_bc(self, I1d):
self.I = np.expand_dims(I1d, axis=2)
def compute_bc(self, atmos, spect):
# if spect.wavelength.shape[0] != self.I.shape[0]:
# result = np.ones((spect.wavelength.shape[0], spect.I.shape[1], atmos.Nz))
# else:
if self.I is None:
raise ValueError('I has not been set (CoronalIrradtion)')
result = np.copy(self.I)
return result
@njit
def time_dep_update_impl(theta, dt, Gamma, GammaPrev, n, nPrev):
Nlevel = n.shape[0]
Nspace = n.shape[1]
GammaPrev = GammaPrev if GammaPrev is not None else np.empty_like(Gamma)
Gam = np.zeros((Nlevel, Nlevel))
nk = np.zeros(Nlevel)
nPrevIter = np.zeros(Nlevel)
nCurrent = np.zeros(Nlevel)
atomDelta = 0.0
for k in range(Nspace):
nCurrent[:] = n[:, k]
nPrevIter[:] = nPrev[:, k]
Gam[...] = -theta * Gamma[:,:, k] * dt
Gam += np.eye(Nlevel)
if theta != 1.0:
nk[:] = (1.0 - theta) * dt * GammaPrev[:,:, k] @ nPrevIter + nPrevIter
else:
nk[:] = nPrevIter
nNew = np.linalg.solve(Gam, nk)
n[:, k] = nNew
atomDelta = max(atomDelta, np.nanmax(np.abs(1.0 - nCurrent / nNew)))
return atomDelta
class MsLightweaverManager:
def __init__(self, atmost, outputDir,
atoms, activeAtoms=['H', 'Ca'],
detailedH=False,
detailedHPath=None,
startingCtx=None, conserveCharge=False,
populationTransportMode='Advect',
downgoingRadiation=None,
prd=False):
# check_write_git_revision(outputDir)
self.atmost = atmost
self.outputDir = outputDir
self.conserveCharge = conserveCharge
self.abund = DefaultAtomicAbundance
self.idx = 0
self.nHTot = atmost.d1 / (self.abund.massPerH * Const.Amu)
self.prd = prd
self.updateRhoPrd = False
self.detailedH = detailedH
# NOTE(cmo): If this is None and detailedH is True then the data from
# atmost will be used, otherwise, an MsLw pickle will be loaded from
# the path.
self.detailedHPath = detailedHPath
if populationTransportMode == 'Advect':
self.advectPops = True
self.rescalePops = False
elif populationTransportMode == 'Rescale':
self.advectPops = False
self.rescalePops = True
elif populationTransportMode is None or populationTransportMode == 'None':
self.advectPops = False
self.rescalePops = False
else:
raise ValueError('Unknown populationTransportMode: %s' % populationTransportMode)
self.downgoingRadiation = downgoingRadiation
if startingCtx is not None:
self.ctx = startingCtx
args = startingCtx.arguments
self.atmos = args['atmos']
self.spect = args['spect']
self.aSet = self.spect.radSet
self.eqPops = args['eqPops']
self.upperBc = atmos.upperBc
else:
nHTot = np.copy(self.nHTot[0])
if self.downgoingRadiation:
self.upperBc = CoronalIrraditation()
else:
self.upperBc = None
self.atmos = Atmosphere.make_1d(scale=ScaleType.Geometric, depthScale=np.copy(atmost.z1[0]), temperature=np.copy(atmost.tg1[0]), vlos=np.copy(atmost.vz1[0]), vturb=np.copy(atmost.vturb), ne=np.copy(atmost.ne1[0]), nHTot=nHTot, upperBc=self.upperBc)
# self.atmos.convert_scales()
self.atmos.quadrature(5)
self.aSet = RadiativeSet(atoms)
self.aSet.set_active(*activeAtoms)
if detailedH:
self.aSet.set_detailed_static('H')
# NOTE(cmo): Radyn seems to compute the collisional rates once per
# timestep(?) and we seem to get a much better agreement for Ca
# with the CH rates when H is set to LTE for the initial timestep.
# Might be a bug in my implementation though.
self.spect = self.aSet.compute_wavelength_grid()
self.mols = MolecularTable()
if self.conserveCharge:
self.eqPops = self.aSet.iterate_lte_ne_eq_pops(self.atmos, self.mols)
else:
self.eqPops = self.aSet.compute_eq_pops(self.atmos, self.mols)
self.ctx = lw.Context(self.atmos, self.spect, self.eqPops, initSol=InitialSolution.Lte, conserveCharge=self.conserveCharge, Nthreads=12)
self.atmos.bHeat = np.ones_like(self.atmost.bheat1[0]) * 1e-20
self.atmos.hPops = self.eqPops['H']
np.save(self.outputDir + 'Wavelength.npy', self.ctx.spect.wavelength)
if self.detailedH:
self.eqPops['H'][:] = self.detailed_hydrogen_pops()
if self.downgoingRadiation:
self.upperBc.set_bc(self.downgoingRadiation.compute_downgoing_radiation(self.spect.wavelength, self.atmos))
self.ctx.depthData.fill = True
# self.opac_background()
# NOTE(cmo): Set up background
# self.opc = OpcFile('opctab_cmo_mslw.dat')
# # self.opc = OpcFile()
# opcWvl = self.opc.wavel
# self.opcWvl = opcWvl
# # NOTE(cmo): Find mapping from wavelength array to opctab array, with
# # constant background over the region of each line. Are overlaps a
# # problem here? Probably -- but let's see the spectrum in practice
# # The record to be used is the one in self.wvlIdxs + 4 due to the data
# # layout in the opctab
# self.wvlIdxs = np.ones_like(self.spect.wavelength, dtype=np.int64) * -1
# lineCores = []
# for a in self.aSet.activeSet:
# for l in a.lines:
# lineCores.append(l.lambda0 * 10)
# lineCores = np.array(lineCores)
# lineCoreIdxs = np.zeros_like(lineCores)
# for i, l in enumerate(lineCores):
# closestIdx = np.argmin(np.abs(opcWvl - l))
# lineCoreIdxs[i] = closestIdx
# for a in self.aSet.activeSet:
# for l in a.lines:
# # closestIdx = np.argmin((opcWvl - l.lambda0*10)**2)
# closestCore = np.argmin(np.abs((l.wavelength * 10)[:, None] - lineCores), axis=1)
# closestIdx = lineCoreIdxs[closestCore]
# sub = find_subarray(self.spect.wavelength, l.wavelength)
# self.wvlIdxs[sub:sub + l.wavelength.shape[0]] = closestIdx
# for i, v in enumerate(self.wvlIdxs):
# if v >= 0:
# continue
# closestIdx = np.argmin(np.abs(opcWvl - self.spect.wavelength[i]*10))
# self.wvlIdxs[i] = closestIdx
# self.opctabIdxs = self.wvlIdxs + 4
# NOTE(cmo): Compute initial background opacity
# np.save('chi.npy', self.ctx.background.chi)
# np.save('eta.npy', self.ctx.background.eta)
# np.save('sca.npy', self.ctx.background.sca)
# self.opac_background()
def initial_stat_eq(self, Nscatter=3, NmaxIter=1000, popTol=1e-3, JTol=3e-3):
if self.prd:
self.ctx.update_hprd_coeffs()
for i in range(NmaxIter):
dJ = self.ctx.formal_sol_gamma_matrices()
if i < Nscatter:
continue
delta = self.ctx.stat_equil()
if self.prd:
self.ctx.prd_redistribute()
if self.ctx.crswDone and dJ < JTol and delta < popTol:
print('Stat eq converged in %d iterations' % (i+1))
break
else:
raise ConvergenceError('Stat Eq did not converge.')
def advect_pops(self):
if self.rescalePops:
adv = self.atmost.d1[self.idx+1] / self.atmost.d1[self.idx]
neAdv = self.atmos.ne * adv
self.atmos.ne[:] = neAdv
for atom in self.aSet.activeAtoms:
p = self.eqPops[atom.element]
for i in range(p.shape[0]):
pAdv = p[i] * adv
p[i, :] = pAdv
elif self.advectPops:
nr_advect(self.atmost, self.idx, self.eqPops, [a.element for a in self.aSet.activeAtoms], self.abund)
# NOTE(cmo): Guess advected n_e. Will be corrected to be self
# consistent later (in update_deps if conserveCharge=True). If
# conserveCharge isn't true then we're using loaded n_e anyway
# neAdv = interp1d(z0Tracer, np.log10(self.atmos.ne), kind=3, fill_value='extrapolate')(z1)
# self.atmos.ne[:] = 10**neAdv
def detailed_hydrogen_pops(self):
if not self.detailedH:
raise ValueError('Detailed H pops called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
pops = step['eqPops']['H']['n']
else:
pops = self.atmost.nh1[self.idx, :] / (np.sum(self.atmost.nh1[self.idx, :], axis=0) / self.atmos.nHTot)[None, :]
return pops
def detailed_ne(self):
if not self.detailedH:
raise ValueError('Detailed ne called without detailedH==True')
if self.detailedHPath:
with open(self.detailedHPath + '/Step_%.6d.pickle' % self.idx, 'rb') as pkl:
step = pickle.load(pkl)
ne = step['ne']
else:
ne = self.atmost.ne1[self.idx]
return ne
def save_timestep(self):
i = self.idx
with open(self.outputDir + 'Step_%.6d.pickle' % i, 'wb') as pkl:
eqPops = distill_pops(self.eqPops)
Iwave = self.ctx.spect.I
lines = []
for a in self.aSet.activeAtoms:
lines += self.aSet[a.element].lines
losses = compute_radiative_losses(self.ctx)
lineLosses = integrate_line_losses(self.ctx, losses, lines, extendGridNm=5.0)
pickle.dump({'eqPops': eqPops, 'Iwave': Iwave,
'ne': self.atmos.ne, 'lines': lines,
'losses': lineLosses}, pkl)
def load_timestep(self, stepNum):
with open(self.outputDir + 'Step_%.6d.pickle' % stepNum, 'rb') as pkl:
step = pickle.load(pkl)
self.idx = stepNum
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
self.atmos.height[:] = self.atmost.z1[self.idx]
for name, pops in step['eqPops'].items():
if pops['n'] is not None:
self.eqPops.atomicPops[name].pops[:] = pops['n']
self.eqPops.atomicPops[name].nStar[:] = pops['nStar']
self.atmos.ne[:] = step['ne']
self.ctx.spect.I[:] = step['Iwave']
self.ctx.update_deps()
def increment_step(self):
self.advect_pops()
self.idx += 1
self.atmos.temperature[:] = self.atmost.tg1[self.idx]
self.atmos.vlos[:] = self.atmost.vz1[self.idx]
if not self.conserveCharge:
self.atmos.ne[:] = self.detailed_ne()
if self.advectPops or self.rescalePops:
self.atmos.nHTot[:] = self.nHTot[self.idx]
self.atmos.bHeat[:] = self.atmost.bheat1[self.idx]
| |
<reponame>codacy-badger/vcstools
#!/usr/bin/env python3
import logging
import argparse
logger = logging.getLogger(__name__)
def find_obsids_meta_pages(params=None):
"""
Loops over pages for each page for MWA metadata calls
"""
if params is None:
params = {'mode':'VOLTAGE_START'}
obsid_list = []
temp =[]
page = 1
#need to ask for a page of results at a time
while len(temp) == 200 or page == 1:
params['page'] = page
logger.debug("Page: {0} params: {1}".format(page, params))
temp = getmeta(service='find', params=params)
if temp is not None:
# if there are non obs in the field (which is rare) None is returned
for row in temp:
obsid_list.append(row[0])
else:
temp = []
page += 1
return obsid_list
def get_obs_array_phase(obsid):
"""
For the input obsid will work out the observations array phase in the form
of P1 for phase 1, P2C for phase 2 compact or P2E for phase to extended array
and OTH for other.
"""
phase_info = getmeta(service='con', params={'obs_id':obsid, 'summary':''})
if phase_info[0] == "PHASE1":
return "P1"
elif phase_info[0] == "COMPACT":
return "P2C"
elif phase_info[0] == "LB":
return "P2E"
elif phase_info[0] == "OTHER":
return "OTH"
else:
logger.error("Unknown phase: {0}. Exiting".format(phase_info[0]))
exit()
def mwa_alt_az_za(obsid, ra=None, dec=None, degrees=False):
"""
Calculate the altitude, azumith and zenith for an obsid
Args:
obsid : The MWA observation id (GPS time)
ra : The right acension in HH:MM:SS
dec : The declintation in HH:MM:SS
degrees: If true the ra and dec is given in degrees (Default:False)
"""
from astropy.utils import iers
iers.IERS_A_URL = 'https://datacenter.iers.org/data/9/finals2000A.all'
logger.debug(iers.IERS_A_URL)
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz, EarthLocation
from astropy import units as u
obstime = Time(float(obsid),format='gps')
if ra is None or dec is None:
#if no ra and dec given use obsid ra and dec
ra, dec = get_common_obs_metadata(obsid)[1:3]
if degrees:
sky_posn = SkyCoord(ra, dec, unit=(u.deg,u.deg))
else:
sky_posn = SkyCoord(ra, dec, unit=(u.hourangle,u.deg))
#earth_location = EarthLocation.of_site('Murchison Widefield Array')
earth_location = EarthLocation.from_geodetic(lon="116:40:14.93", lat="-26:42:11.95", height=377.8)
altaz = sky_posn.transform_to(AltAz(obstime=obstime, location=earth_location))
Alt = altaz.alt.deg
Az = altaz.az.deg
Za = 90. - Alt
return Alt, Az, Za
def get_common_obs_metadata(obs, return_all = False):
"""
Gets needed comon meta data from http://ws.mwatelescope.org/metadata/
"""
logger.info("Obtaining metadata from http://ws.mwatelescope.org/metadata/ for OBS ID: " + str(obs))
#for line in txtfile:
beam_meta_data = getmeta(service='obs', params={'obs_id':obs})
#obn = beam_meta_data[u'obsname']
ra = beam_meta_data[u'metadata'][u'ra_pointing'] #in sexidecimal
dec = beam_meta_data[u'metadata'][u'dec_pointing']
dura = beam_meta_data[u'stoptime'] - beam_meta_data[u'starttime'] #gps time
xdelays = beam_meta_data[u'rfstreams'][u"0"][u'xdelays']
ydelays = beam_meta_data[u'rfstreams'][u"0"][u'ydelays']
minfreq = float(min(beam_meta_data[u'rfstreams'][u"0"][u'frequencies']))
maxfreq = float(max(beam_meta_data[u'rfstreams'][u"0"][u'frequencies']))
channels = beam_meta_data[u'rfstreams'][u"0"][u'frequencies']
centrefreq = 1.28 * (minfreq + (maxfreq-minfreq)/2)
if return_all:
return [obs, ra, dec, dura, [xdelays, ydelays], centrefreq, channels], beam_meta_data
else:
return [obs, ra, dec, dura, [xdelays, ydelays], centrefreq, channels]
def getmeta(servicetype='metadata', service='obs', params=None):
"""
Function to call a JSON web service and return a dictionary:
Given a JSON web service ('obs', find, or 'con') and a set of parameters as
a Python dictionary, return a Python dictionary xcontaining the result.
Taken verbatim from http://mwa-lfd.haystack.mit.edu/twiki/bin/view/Main/MetaDataWeb
"""
import urllib.request
import json
# Append the service name to this base URL, eg 'con', 'obs', etc.
BASEURL = 'http://ws.mwatelescope.org/'
if params:
# Turn the dictionary into a string with encoded 'name=value' pairs
data = urllib.parse.urlencode(params)
else:
data = ''
try:
result = json.load(urllib.request.urlopen(BASEURL + servicetype + '/' + service + '?' + data))
except urllib.error.HTTPError as err:
logger.error("HTTP error from server: code=%d, response:\n %s" % (err.code, err.read()))
return
except urllib.error.URLError as err:
logger.error("URL or network error: %s" % err.reason)
return
return result
def get_files(obsid, files_meta=None):
"""
Queries the metadata to find all the file names
Parameters:
-----------
obsid: str
The ID (gps time) of the observation you are querying
meta: dict
The output of the getmeta function. This is an optional input that can
be used if you just want to extract the relevant info and save a
metadata call
Output:
-------
files: list
A list of all the file names
"""
if files_meta is None:
files_meta = getmeta(servicetype='metadata', service='data_files', params={'obs_id':str(obsid)})
return list(files_meta.keys())
def calc_ta_fwhm(freq, array_phase='P2C'):
"""
Calculates the approximate FWHM of the tied array beam in degrees.
Parameters:
-----------
freq: float
Frequency in MHz
array_phase: string
OPTIONAL - The different array phase (from P1, P2C, P2E) to work out the maximum baseline length. Default = 'P2C'
Returns:
--------
fwhm: float
FWHM in degrees
"""
from scipy.constants import c
from math import degrees
# Work out baseline in meters
if array_phase == 'P1':
# True max_baseline is 2800 but due to the minimal amount of long baselines
# the following is more realisitic
max_baseline = 2200.
if array_phase == 'P2C':
# True max_baseline is 700.
max_baseline = 360.
elif array_phase == 'P2E':
max_baseline = 5300.
wavelength = c / (freq * 1e6)
fwhm = degrees(wavelength / max_baseline)
return fwhm
def get_channels(obsid, channels=None):
"""
Gets the channels ids from the observation's metadata. If channels is not None assumes the
channels have already been aquired so it doesn't do an unnecessary database call.
"""
if channels is None:
print("Obtaining frequency channel data from http://mwa-metadata01.pawsey.org.au/metadata/"
"for OBS ID: {}".format(obsid))
beam_meta_data = getmeta(service='obs', params={'obs_id':obsid})
channels = beam_meta_data[u'rfstreams'][u"0"][u'frequencies']
return channels
def is_number(s):
try:
int(s)
return True
except ValueError:
return False
def obs_max_min(obsid, meta=None):
"""
Small function to query the database and return the times of the first and last file
"""
# Make a list of gps times excluding non-numbers from list
times = [f[11:21] for f in get_files(obsid) if is_number(f[11:21])]
obs_start = int(min(times))
obs_end = int(max(times))
return obs_start, obs_end
def write_obs_info(obsid):
"""
Writes obs info to a file in the current direcory
"""
data_dict = getmeta(service='obs', params={'obs_id':str(obsid), 'nocache':1})
filename = str(obsid)+"_info.txt"
logger.info("Writing to file: {0}".format(filename))
channels = data_dict["rfstreams"]["0"]["frequencies"]
centre_freq = ( min(channels) + max(channels) ) / 2. * 1.28
array_phase = get_obs_array_phase(obsid)
start, stop = obs_max_min(obsid, meta=data_dict)
f = open(filename, "w+")
f.write("------------------------- Obs Info --------------------------\n")
f.write("Obs Name: {}\n".format(data_dict["obsname"]))
f.write("Creator: {}\n".format(data_dict["rfstreams"]["0"]["creator"]))
f.write("Array phase: {}\n".format(array_phase))
if array_phase != 'OTH':
f.write("~FWHM (arcminute) {:4.2f}\n".format(calc_ta_fwhm(centre_freq,
array_phase=array_phase)*60.))
f.write("Start time: {}\n".format(start))
f.write("Stop time: {}\n".format(stop))
f.write("Duration (s): {}\n".format(stop-start))
f.write("RA Pointing (deg): {}\n".format(data_dict["metadata"]["ra_pointing"]))
f.write("DEC Pointing (deg): {}\n".format(data_dict["metadata"]["dec_pointing"]))
f.write("Channels: {}\n".format(channels))
f.write("Centrefreq (MHz): {}\n".format(centre_freq))
f.close()
def get_best_cal_obs(obsid):
"""
For the input MWA observation ID find all calibration observations within 2 days
that have the same observing channels and list them from closest in time to furthest.
Parameters
----------
obsid: int
The MWA observation ID (gps time)
Returns
-------
cal_ids: list of lists
All calibration observations within 2 days that have the same observing channels and
list them from closest in time to furthest
[[obsid, mins_away, cal_target]]
"""
from operator import itemgetter
obs_meta = getmeta(params={'obs_id':str(obsid)})
channels = obs_meta[u'rfstreams'][u"0"][u'frequencies']
cenchan = channels[12]
if channels[-1] - channels[0] == 23:
contig = 1
else:
contig = 0
two_days_secs = 2*24*60*60
all_cals = find_obsids_meta_pages(params={'calibration':1,
'mintime': obsid-two_days_secs,
'maxtime': obsid+two_days_secs,
'cenchan': cenchan,
'contigfreq': contig,
'dataquality': 126})
cal_info = []
for cal in all_cals:
#get the cal metadata
cal_meta = getmeta(params={'obs_id':str(cal), 'nocache':1})
#check there are a factor of 24 files (no gpu boxes are down)
gpubox_files = []
cal_files_meta = getmeta(service='data_files', params={'obs_id':obsID})
for f in cal_files_meta.keys():
if 'gpubox' in f:
gpubox_files.append(f)
if len(gpubox_files)%24 != 0 :
continue
#calculate the time away from the obs and append it to the list
cal_info.append([cal, abs(obsid-cal)/60., cal_meta['obsname']])
#sort by time
cal_info = sorted(cal_info, key=itemgetter(1))
return cal_info
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""Returns information on an input Obs ID""")
parser.add_argument("obsid", type=int, help="Input Observation ID")
parser.add_argument("-w", "--write", action="store_true", help="OPTIONAL - Use to write results to file.")
parser.add_argument("-c", "--cal_best", action="store_true", help="If this option is used it will list "
"calibration observations within 2 days that have the same observing channels and "
"list them from closest in time to furthest.")
args = parser.parse_args()
if args.write:
write_obs_info(args.obsid)
else:
data_dict = getmeta(params={"obsid":args.obsid, 'nocache':1})
channels = data_dict["rfstreams"]["0"]["frequencies"]
centre_freq = ( min(channels) + max(channels) ) / 2. * 1.28
array_phase = get_obs_array_phase(args.obsid)
start, stop = obs_max_min(args.obsid, meta=data_dict)
print("------------------------- Obs Info --------------------------")
print("Obs Name: {}".format(data_dict["obsname"]))
print("Creator: {}".format(data_dict["rfstreams"]["0"]["creator"]))
print("Array phase: {}".format(array_phase))
if array_phase != 'OTH':
print("~FWHM (arcminute) {:4.2f}".format(calc_ta_fwhm(centre_freq,
array_phase=array_phase)*60.))
print("Start time: {}".format(start))
print("Stop time: {}".format(stop))
print("Duration (s): {}".format(stop-start))
print("RA Pointing (deg): {}".format(data_dict["metadata"]["ra_pointing"]))
print("DEC Pointing (deg): {}".format(data_dict["metadata"]["dec_pointing"]))
print("Channels: {}".format(data_dict["rfstreams"]["0"]["frequencies"]))
print("Centrefreq (MHz): {}".format(centre_freq))
if args.cal_best:
all_cals = get_best_cal_obs(args.obsid)
print()
print("{:14}|{:8}|{}".format("Calibration ID", "Hrs away", | |
totale = importo_totale+imposta_totale
# print "DDT NUMERO : {0} TOTALE {1}".format(id_ddt,totale)
return totale
def ritorna_int_calcola_totale_iva_inclusa_da_ddt(id_ddt):
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt).select()
# print "DDT ID : ",id_ddt
totale = 0
importo_totale = 0
imposta_totale = 0
for row in rows:
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale += saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
except:
pass
totale = importo_totale+imposta_totale
# print "DDT NUMERO : {0} TOTALE {1}".format(id_ddt,totale)
return totale
def calcola_totale_iva_inclusa_da_ddt(id_ddt):
print "Dentro qui"
print "DDT ID : ",id_ddt
rows = db((db.saved_righe_in_ddt_cliente.saved_ddt_id == id_ddt) & (db.saved_righe_in_ddt_cliente.codice_articolo !="commento")).select()
print "DDT ID : ",id_ddt
totale = 0
importo_totale = 0
imposta_totale = 0
print "sono qui"
for row in rows:
id_ordine = row.id_ordine
try:
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["percentuale_iva"]
importo_totale += saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
except Exception,e:
print e
pass
totale = importo_totale+imposta_totale
totale = Money(str(totale),"EUR")
totale = totale.format("it_IT").encode('ascii', 'ignore').decode('ascii')
print "Totale calcolato = ",totale
return totale
def fatturazione_differita():
fields = ['nome_cliente','dal','al']
cliente_form = SQLFORM(db.fattura_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
db(db.ddt_da_fatturare.user_id == auth.user_id).delete()
row = db(db.fattura_cliente.id == cliente_form.vars.id).select().first()
row.update_record(id_cliente = id_cliente.id)
redirect(URL('fatturazione_differita_2',args=cliente_form.vars.id))
return locals()
def fatturazione_istantanea():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_cliente = id_cliente.id)
db(db.righe_in_fattura_istantanea).delete()
redirect(URL('fatturazione_istantanea_2',args=id_cliente.id))
return locals()
def nota_di_accredito():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_cliente = id_cliente.id)
db(db.righe_in_fattura_istantanea).delete()
redirect(URL('nota_di_accredito_2',args=id_cliente.id))
return locals()
def ddt_clienti():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_cliente = id_cliente.id)
db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
redirect(URL('ddt_clienti_2',args=cliente_form.vars.id))
return locals()
def mod_ddt_clienti():
fields = ['nome_cliente']
cliente_form = SQLFORM(db.ddt_cliente,formname='cliente_form_mod',formstyle = 'table3cols',fields=fields)
if cliente_form.process().accepted:
id_cliente = db(db.clienti.nome == cliente_form.vars.nome_cliente).select().first()
# print "ID CLIENTE = ",id_cliente
# print cliente_form.vars.id #LAST IMSERTED ID
row = db(db.ddt_cliente.id == cliente_form.vars.id).select().first()
# print "SELECTED ROW : ",row
# row.update_record(id_cliente = id_cliente.id)
# db(db.righe_in_ddt_cliente.user_id == auth.user_id).delete()
redirect(URL('mod_ddt_clienti_2',args=id_cliente.id))
return locals()
def ddt_fornitori():
fields = ['nome_fornitore']
fornitore_form = SQLFORM(db.ddt_fornitore,formname='fornitore_form',formstyle = 'table3cols',fields=fields)
if fornitore_form.process().accepted:
# print fornitore_form.vars.nome_fornitore
id_fornitore = db(db.fornitori.nome == fornitore_form.vars.nome_fornitore).select().first()
row = db(db.ddt_fornitore.id == fornitore_form.vars.id).select().first()
# print "SELECTED ROW : ",row
row.update_record(id_fornitore = id_fornitore.id)
redirect(URL('ddt_fornitori_2',args=fornitore_form.vars.id))
return locals()
def ddt_clienti_old():
links=[lambda row: A(XML('Crea bolla'),_class='button btn btn-default',_href=URL('dettaglio_bolla',args=row.id))]
fields=[db.righe_in_ordine_cliente.n_riga,db.righe_in_ordine_cliente.codice_articolo,db.righe_in_ordine_cliente.quantita,db.righe_in_ordine_cliente.prezzo,db.righe_in_ordine_cliente.sconti,db.righe_in_ordine_cliente.codice_iva,db.righe_in_ordine_cliente.evasione]
righe_in_ordine_cliente_form = SQLFORM.grid(db.ordine_cliente,formname='ordini_clienti',maxtextlength=100,create=False,editable=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,links=links)
return dict(righe_in_ordine_cliente_form=righe_in_ordine_cliente_form)
def gestione_piano_dei_conti():
return dict(message="ok")
def anagrafica_codici_iva():
codici_iva_form = SQLFORM.grid(db.anagrafica_codici_iva,formname='codici_iva',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,exportclasses=export_classes)
codici_iva_form.element('.web2py_counter', replace=None)
return dict(codici_iva_form = codici_iva_form)
def anagrafica_banche():
anagrafica_banche_form = SQLFORM.grid(db.anagrafica_banche,formname='anagrafica_banche_form',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,exportclasses=export_classes)
anagrafica_banche_form.element('.web2py_counter', replace=None)
try:
anagrafica_banche_form.element('input[name=descrizione_sottoconto]')['_style'] = 'width:350px;height:25px;'
anagrafica_banche_form.element('input[name=descrizione]')['_style'] = 'width:350px;height:25px;'
except:
pass
return dict(anagrafica_banche_form = anagrafica_banche_form)
def anagrafica_banche_azienda():
anagrafica_banche_form = SQLFORM.grid(db.anagrafica_banche_azienda,formname='anagrafica_banche_form',maxtextlength=100,create=True, deletable=True,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True)
anagrafica_banche_form.element('.web2py_counter', replace=None)
try:
anagrafica_banche_form.element('input[name=descrizione_sottoconto]')['_style'] = 'width:350px;height:25px;'
anagrafica_banche_form.element('input[name=descrizione]')['_style'] = 'width:350px;height:25px;'
except:
pass
return dict(anagrafica_banche_form = anagrafica_banche_form)
def fatture_form():
fields = [db.fatture_salvate.data_fattura,db.fatture_salvate.numero_fattura,db.fatture_salvate.totale,db.fatture_salvate.nome_cliente,db.fatture_salvate.scadenza]
"""Patch per sistemare la data
"""
x = datetime.datetime(1999, 5, 17)
fatture=db(db.fatture_salvate.scadenza > x).select()
for fattura in fatture:
original_start_date = fattura.data_fattura
if original_start_date is not None:
day_start,day_end = monthrange(original_start_date.year, original_start_date.month)
d = str(day_end)+"/"+str(original_start_date.month)+"/"+str(original_start_date.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
# print original_start_date,start_date
fattura.data_fattura = start_date
fattura.update_record()
if len(request.args) > 1 and ('edit' in request.args):
db.fatture_salvate.numero_fattura.writable=False
db.fatture_salvate.id_ddt.writable=False
db.fatture_salvate.id_ddt.readable=False
db.fatture_salvate.id_cliente.writable=False
db.fatture_salvate.id_cliente.readable=False
db.fatture_salvate.id_cliente.writable=False
db.fatture_salvate.id_cliente.readable=False
db.fatture_salvate.richiede_riba.writable=False
db.fatture_salvate.richiede_riba.readable=False
db.fatture_salvate.riba_emessa.writable=False
db.fatture_salvate.riba_emessa.readable=False
links=[lambda row: BUTTON("Aggiungi fattura",_onclick=XML('aggiungiFattura('+str(row.id)+')'),_class='button btn btn-default')]
fatture_form = SQLFORM.grid(db.fatture_salvate.richiede_riba=='T',formname='fatture',maxtextlength=100,create=False, deletable=False,searchable=True,sortable=True,paginate=5, formstyle = 'table3cols',csv=True,fields=fields,links=links,exportclasses=export_classes)
return locals()
@service.jsonrpc
@service.jsonrpc2
def successivo_riba(banca):
if db(db.fatture_scelte).isempty():
response.flash="Selezionare almeno una fattura"
return 1/0
db(db.temp_banca).delete()
db.temp_banca.insert(banca=banca)
return "ok"
@service.jsonrpc
@service.jsonrpc2
def accorpa(id,val):
d=db(db.fatture_scelte.id == id).select().first()
if "True" in str(val):
d.update_record(accorpa=True)
else:
d.update_record(accorpa=False)
return "ok"
def crea_indici_riba():
"""
Formato : id_cliente,lista(id_fatture)
"""
cliente = []
lista_riba=[]
fatture_accorpate = []
fatture=db(db.fatture_scelte).select()
for f in fatture:
id_cliente = f.id_cliente
fatture_accorpate = []
lista_fatture = []
if db((db.fatture_scelte.id_cliente == id_cliente) & (db.fatture_scelte.accorpa == 'T')).count() < 2:
"""
Nessuna fattura da accorpare per questo cliente
"""
lista_fatture.append(f.id_fattura)
pass
else:
da_accorpare = db((db.fatture_scelte.id_cliente == id_cliente) & (db.fatture_scelte.accorpa == 'T')).select()
for item in da_accorpare:
if not item in lista_fatture:
lista_fatture.append(item.id_fattura)
cliente = []
cliente.append(id_cliente)
cliente.append(lista_fatture)
if not cliente in lista_riba:
lista_riba.append(cliente)
return lista_riba
def ritorna_dettaglio_fattura(id_fattura):
fattura = db(db.fatture_salvate.id ==id_fattura).select().first()
msg = "Fattura numero "+fattura.numero_fattura +" Del " + fattura.data_fattura.strftime("%d/%m/%Y")+ " Tot. " + ritorna_prezzo_europeo(fattura.totale) + " <b>Scadenza</b> "+fattura.scadenza.strftime("%d/%m/%Y")
return msg
def ritorna_nome_cliente_da_id(id):
return db(db.clienti.id==id).select().first().nome
def ritorna_abi_nostra_banca_scelta():
banca_scelta = db(db.temp_banca).select().first().banca
return db(db.anagrafica_banche_azienda.descrizione == banca_scelta).select().first().codice_abi
def ritorna_cab_nostra_banca_scelta():
banca_scelta = db(db.temp_banca).select().first().banca
return db(db.anagrafica_banche_azienda.descrizione == banca_scelta).select().first().codice_cab
def ritorna_scadenza_e_totale_fattura_per_riba(id_fattura):
d = db(db.fatture_salvate.id == id_fattura).select().first()
scadenza = d.scadenza.strftime("%d%m%y")
totale = d.totale
# print "TOT : ".format(totale)
return scadenza,totale
def ritorna_abi_cab_da_cliente_id(cliente_id):
# print cliente_id
codice_banca = db(db.clienti.id == cliente_id).select().first().codice_banca
codice_abi=""
codice_cab=""
try:
d= db(db.anagrafica_banche.descrizione == codice_banca).select().first()
codice_abi=d.codice_abi
codice_cab=d.codice_cab
except:
pass
return d.codice_abi,d.codice_cab
def truncate_float(number, length):
"""Truncate float numbers, up to the number specified
in length that must be an integer"""
number = number * pow(10, length)
number = int(number)
number = float(number)
number /= pow(10, length)
return number
def crea_file_riba():
"""Numero Univoco per ogni file riba creato?"""
try:
numero_disposizione = db(db.numero_disposizioni_riba).select().first().numero
numero_disposizione = int(numero_disposizione)
except:
numero_disposizione = 1
"""Contenitore per il flusso CBI"""
flow = wrapper.Flow()
flow.header = wrapper.Record('IB')
flow.footer = wrapper.Record('EF')
codice_assegnato_dalla_sia_alla_azienda_emittente ="60I33"
codice_abi_banca_assuntrice = ritorna_abi_nostra_banca_scelta()
codice_cab_banca_assuntrice = ritorna_cab_nostra_banca_scelta()
data_creazione = datetime.datetime.now().date().strftime("%d/%m/%y").replace("/","")
nome_supporto = "OpenGest"
codice_divisa = "E"
flow.header['mittente'] = codice_assegnato_dalla_sia_alla_azienda_emittente
flow.header['ricevente'] = codice_abi_banca_assuntrice
flow.header['data_creazione'] = data_creazione
flow.header['nome_supporto'] = nome_supporto
flow.header['codice_divisa'] = codice_divisa
flow.footer['mittente']=codice_assegnato_dalla_sia_alla_azienda_emittente
flow.footer['ricevente']=codice_abi_banca_assuntrice
flow.footer['data_creazione']=data_creazione
flow.footer['nome_supporto']=nome_supporto
flow.footer['codice_divisa']=codice_divisa
numero_emissioni = crea_indici_riba()
# print "NUMERO EMISSIONI = {0} ".format(len(numero_emissioni))
flow.footer['numero_disposizioni']=str(len(numero_emissioni)).zfill(7)
totalissimo = 0
flow.disposals = []
for numero_progressivo in range(1,len(numero_emissioni) +1):
"""Contiene tutti e 7 i record"""
disposizione = wrapper.Disposal()
# print "QUI"
"""instanza ai vari record cbi"""
first_record = wrapper.Record('14')
second_record = wrapper.Record('20')
third_record = wrapper.Record('30')
fourth_record = wrapper.Record('40')
fifth_record = wrapper.Record('50')
fifty_one = wrapper.Record('51')
seventieth_record = wrapper.Record('70')
emissione_corrente = numero_emissioni[numero_progressivo - 1]
cliente_id = emissione_corrente[0]
fatture = emissione_corrente[1]
"""
Raccolta dati per il record 14 first_record
"""
codice_abi_domiciliaria,codice_cab_domiciliaria=ritorna_abi_cab_da_cliente_id(cliente_id)
codice_cliente_debitore = cliente_id
# print ritorna_abi_cab_da_cliente_id
importo_della_ricevuta_in_centesimi = 0
riferimento_fattura = ""
for id_fattura in fatture:
data_pagamento,totale = ritorna_scadenza_e_totale_fattura_per_riba(id_fattura)
importo_della_ricevuta_in_centesimi += float(totale)
totalissimo += importo_della_ricevuta_in_centesimi
riferimento_fattura+= db(db.fatture_salvate.id == id_fattura).select().first().numero_fattura+" del "+db(db.fatture_salvate.id == id_fattura).select().first().data_fattura.strftime("%d/%m/%Y") + " "
importo_della_ricevuta_in_centesimi = '%.2f' % round(importo_della_ricevuta_in_centesimi,2)
importo_della_ricevuta_in_centesimi = importo_della_ricevuta_in_centesimi.replace(".","").zfill(13)
# print "importo : {0}".format(importo_della_ricevuta_in_centesimi)
first_record['numero_progressivo']=str(numero_progressivo).zfill(7)
first_record['data_pagamento']=data_pagamento
first_record['importo']=str(importo_della_ricevuta_in_centesimi)
first_record['codice_abi_banca']=codice_abi_banca_assuntrice
first_record['cab_banca']=codice_cab_banca_assuntrice
first_record['codice_abi_domiciliaria']=codice_abi_domiciliaria
first_record['codice_cab_domiciliaria']=codice_cab_domiciliaria
first_record['codice_azienda']=codice_assegnato_dalla_sia_alla_azienda_emittente
first_record['codice_cliente_debitore']=codice_cliente_debitore
first_record['codice_divisa']=codice_divisa
first_record['causale']="30000"
first_record['segno']="-"
first_record['tipo_codice']="4"
second_record['numero_progressivo']=str(numero_progressivo).zfill(7)
second_record['1_segmento']="Microcarp"
second_record['2_segmento']="Strada statale 416"
second_record['3_segmento']="26020 Castelleone (CR)"
second_record['4_segmento']="Italia"
dati_cliente = db(db.clienti.id == cliente_id).select().first()
third_record['numero_progressivo'] = str(numero_progressivo).zfill(7)
third_record['codice_fiscale_cliente'] = dati_cliente.codice_fiscale
third_record['1_segmento'] = dati_cliente.nome[:27]
third_record['2_segmento'] = ""
fourth_record['numero_progressivo'] = str(numero_progressivo).zfill(7)
fourth_record['indirizzo'] = dati_cliente.indirizzo
fourth_record['cap'] = dati_cliente.cap
fourth_record['comune_e_sigla_provincia'] = dati_cliente.provincia
fourth_record['completamento_indirizzo'] = ""
fourth_record['codice_paese'] = "IT"
riferimento_fattura =(riferimento_fattura[:30] + '..') if len(riferimento_fattura) > 30 else riferimento_fattura
fifth_record['numero_progressivo'] =str(numero_progressivo).zfill(7)
fifth_record['1_segmento'] = "R.F. " + riferimento_fattura
fifth_record['2_segmento'] = "IMPORTO " + importo_della_ricevuta_in_centesimi
fifth_record['codifica_fiscale_creditore'] = str(dati_cliente.partita_iva)
fifty_one['numero_progressivo'] = str(numero_progressivo).zfill(7)
fifty_one['numero_ricevuta'] = str(numero_disposizione).zfill(10)
fifty_one['denominazione_creditore'] = "MICROCARP | |
<filename>trident/callbacks/lr_schedulers.py
"""Learning Rate Scheduler Callbacks"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import math
import random
import warnings
import types
import numpy as np
from trident.backend.common import *
from trident.backend.common import get_backend
from trident.callbacks.callback_base import CallbackBase
_session = get_session()
if get_backend()=='pytorch':
from trident.backend.pytorch_backend import save, load
from trident.backend.pytorch_ops import to_numpy,to_tensor,pow,clip
elif get_backend()=='tensorflow':
from trident.backend.tensorflow_backend import save, load
from trident.backend.tensorflow_ops import to_numpy,to_tensor,pow,clip
__all__ = ['AdjustLRCallback','ReduceLROnPlateau','reduce_lr_on_plateau','LambdaLR','lambda_lr','RandomCosineLR','PolyLR','random_cosine_lr','CosineLR','cosine_lr','OnceCycleLR','StepLR']
class AdjustLRCallbackBase(CallbackBase):
"""Basic class for learning rate scheduler"""
def __init__(self):
super(AdjustLRCallbackBase, self).__init__()
self.base_lr=1e-3
self.base_lrs = [1e-3]
def adjust_learning_rate(self,training_context,new_lr,verbose=True):
old_lr = training_context['optimizer'].lr
if old_lr!=new_lr:
training_context['optimizer'].param_groups[0]['lr'] = new_lr
training_context['current_lr'] = new_lr
if verbose:
print('learning rate changed! ( form {0:.3e} to {1:.3e})'.format(old_lr, new_lr))
class AdjustLRCallback(AdjustLRCallbackBase):
def __init__(self, index: int, unit:str='epoch',new_lr:float=1e-3):
super().__init__()
self.unit=unit
self.index=index
self.new_lr=new_lr
def on_batch_end(self, training_context):
if self.unit == 'batch' and training_context['steps'] == self.index:
self.adjust_learning_rate(training_context,self.new_lr)
def on_epoch_end(self, training_context):
if self.unit == 'epoch' and training_context['current_epoch'] == self.index :
self.adjust_learning_rate(training_context,self.new_lr)
class LRFinder(AdjustLRCallbackBase):
"""
Plots the change of the loss function of a Keras model when the learning rate is exponentially increasing.
See for details:
https://towardsdatascience.com/estimating-optimal-learning-rate-for-a-deep-neural-network-ce32f2556ce0
"""
def __init__(self, start_lr=1e-7,end_lr=100,n_skip_beginning=10, n_skip_end=5,sma=1):
self.losses = []
self.lrs = []
self.best_loss = 1e9
self.start_lr=start_lr
self.end_lr=end_lr
self.n_skip_beginning=n_skip_beginning
self.n_skip_end=n_skip_end
self.sma=sma
def on_training_start(self, training_context):
num_batches = training_context['total_epoch'] * training_context['total_batch']
self.lr_mult = (float(self.end_lr) / float(self.start_lr)) ** (float(1) / float(num_batches))
# Remember the original learning rate
self.base_lr =training_context['optimizer'].lr
save(training_context['current_model'].state_dict(),'Models/state_dict.pth')
# Set the initial learning rate
self.adjust_learning_rate(training_context, self.start_lr)
def on_batch_end(self, training_context):
# Log the learning rate
lr =training_context['optimizer'].lr
self.lrs.append(lr)
loss =training_context['current_loss'].item()
self.losses.append(loss)
training_context['current_model'].load_state_dict(load('Models/state_dict.pth'))
# Check whether the loss got too large or NaN
if math.isnan(loss) or loss > self.best_loss * 4:
self.adjust_learning_rate(training_context, self.base_lr )
if loss < self.best_loss:
self.best_loss = loss
# Increase the learning rate for the next batch
lr *= self.lr_mult
self.adjust_learning_rate(training_context, lr,)
def on_epoch_end(self, training_context):
self.plot_loss(training_context)
self.sma=1
self.plot_loss_change(training_context)
self.sma = 5
self.plot_loss_change(training_context)
self.adjust_learning_rate(training_context, self.base_lr)
def plot_loss(self, training_context):
"""
Plots the loss.
Parameters:
n_skip_beginning - number of batches to skip on the left.
n_skip_end - number of batches to skip on the right.
"""
plt.ylabel("loss")
plt.xlabel("learning rate (log scale)")
plt.plot(self.lrs[self.n_skip_beginning:-self.n_skip_end], self.losses[self.n_skip_beginning:-self.n_skip_end])
plt.xscale('log')
def plot_loss_change(self,training_context):
"""
Plots rate of change of the loss function.
Parameters:
sma - number of batches for simple moving average to smooth out the curve.
n_skip_beginning - number of batches to skip on the left.
n_skip_end - number of batches to skip on the right.
y_lim - limits for the y axis.
"""
assert self.sma >= 1
derivatives = [0] * self.sma
for i in range(self.sma, len(self.lrs)):
derivative = (self.losses[i] - self.losses[i - self.sma]) / self.sma
derivatives.append(derivative)
plt.ylabel("rate of loss change")
plt.xlabel("learning rate (log scale)")
plt.plot(self.lrs[self.n_skip_beginning:-self.n_skip_end], derivatives[self.n_skip_beginning:-self.n_skip_end])
plt.xscale('log')
y_lim = (-0.01, 0.01)
plt.ylim(y_lim)
class OnceCycleLR(AdjustLRCallbackBase):
def __init__(self, lr_range: tuple = (0.1, 1.), momentum_range: tuple = (0.85, 0.95), annihilation_frac: float = 0.1, reduce_factor: float = 0.01,
last_step: int = -1):
super().__init__()
self.min_lr, self.max_lr = lr_range[0], lr_range[1]
assert self.min_lr < self.max_lr, \
"Argument lr_range must be (min_lr, max_lr), where min_lr < max_lr"
self.min_momentum, self.max_momentum = momentum_range[0], momentum_range[1]
assert self.min_momentum < self.max_momentum, \
"Argument momentum_range must be (min_momentum, max_momentum), where min_momentum < max_momentum"
self.annihilation_frac=annihilation_frac
self.reduce_factor=reduce_factor
self.last_step = last_step
def on_training_start(self, training_context):
self.num_steps =training_context['total_epoch']*training_context['total_batch']
self.num_cycle_steps = int(self.num_steps * (1. - self.annihilation_frac)) # Total number of steps in the cycle
self.final_lr = self.min_lr * self.reduce_factor
def on_batch_end(self, training_context):
current_step =training_context['steps']
self.last_step =training_context['steps']-1
if current_step <= self.num_cycle_steps // 2:
# Scale up phase
scale = current_step / (self.num_cycle_steps // 2)
lr = self.min_lr + (self.max_lr - self.min_lr) * scale
momentum = self.max_momentum - (self.max_momentum - self.min_momentum) * scale
elif current_step <= self.num_cycle_steps:
# Scale down phase
scale = (current_step - self.num_cycle_steps // 2) / (self.num_cycle_steps - self.num_cycle_steps // 2)
lr = self.max_lr - (self.max_lr - self.min_lr) * scale
momentum = self.min_momentum + (self.max_momentum - self.min_momentum) * scale
elif current_step <= self.num_steps:
# Annihilation phase: only change lr
scale = (current_step - self.num_cycle_steps) / (self.num_steps - self.num_cycle_steps)
lr = self.min_lr - (self.min_lr - self.final_lr) * scale
momentum = None
else:
# Exceeded given num_steps: do nothing
return
training_context['optimizer'].param_groups[0]['lr'] = lr
if momentum:
training_context['optimizer'].param_groups[0]['momentum'] = momentum
class StepLR(AdjustLRCallbackBase):
def __init__(self,frequency: int, unit='batch', gamma=0.5):
super().__init__()
self.frequency=frequency
if unit not in ['batch','epoch']:
raise ValueError('Only {0} is valid unit value.'.format( ['batch','epoch']))
else:
self.unit=unit
self.gamma=gamma
def on_epoch_end(self, training_context):
current_epoch = training_context['current_epoch']
if self.unit == 'epoch' and (current_epoch + 1) % self.frequency == 0:
self.adjust_learning_rate(training_context,training_context['optimizer'].lr*self.gamma,verbose=True)
def on_batch_end(self, training_context):
current_step =training_context['steps']
if current_step>0 and self.unit=='batch' and (current_step+1)%self.frequency==0:
self.adjust_learning_rate(training_context, training_context['optimizer'].lr* self.gamma, verbose=True)
class PolyLR(AdjustLRCallbackBase):
def __init__(self,max_lr=1e-3, max_iter=10000):
super().__init__()
self.max_lr = max_lr
self.max_iter=max_iter
def on_batch_end(self, training_context):
current_step =training_context['steps']
lr = self.max_lr * (1 - (current_step/ self.max_iter)) * (1 - (current_step / self.max_iter))
if (lr < 1.0e-7):
lr = 1.0e-7
self.adjust_learning_rate(training_context, lr, verbose=False)
class ReduceLROnPlateau(AdjustLRCallbackBase):
"""
Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
"""
def __init__(self, monitor='total_losses', factor=0.1, patience=10,
verbose=0, mode='auto', min_delta=1e-4, cooldown=0, min_lr=0,unit_base='epoch',
**kwargs):
"""
Args:
monitor: quantity to be monitored.
factor: factor by which the learning rate will
be reduced. new_lr = lr * factor
patience: number of epochs that produced the monitored
quantity with no improvement after which training will
be stopped.
Validation quantities may not be produced for every
epoch, if the validation frequency
(`model.fit(validation_freq=5)`) is greater than one.
verbose: int. 0: quiet, 1: update messages.
mode: one of {auto, min, max}. In `min` mode,
lr will be reduced when the quantity
monitored has stopped decreasing; in `max`
mode it will be reduced when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity.
min_delta: threshold for measuring the new optimum,
to only focus on significant changes.
cooldown: number of epochs to wait before resuming
normal operation after lr has been reduced.
min_lr: lower bound on the learning rate.
"""
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau '
'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
warnings.warn('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0
self.wait = 0
self.best = 0
if monitor=='total_losses':
mode='min'
self.mode = mode
self.monitor_op = None
self.unit_base=unit_base
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, '
'fallback to auto mode.' % (self.mode),
RuntimeWarning)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_epoch_end(self, training_context):
training_context['current_lr']=training_context['optimizer'].lr
if self.unit_base == 'epoch' :
steps=None
history=None
if self.monitor in training_context['losses']:
steps, history = training_context['losses'].get_series(self.monitor)
elif self.monitor in training_context['metrics']:
steps, history = training_context['metrics'].get_series(self.monitor)
else:
steps, history = training_context['losses'].get_series('total_losses')
current = to_numpy(history[-min(5, len(history)):]).mean()
if current is None:
warnings.warn(
'Reduce LR on plateau conditioned on metric `%s` '
'which is not available. Available metrics are: %s' %
(self.monitor, ','.join(training_context['metrics'].keys_list)), RuntimeWarning
)
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = float(training_context['optimizer'].lr)
if old_lr > self.min_lr:
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
self.adjust_learning_rate(training_context, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing '
'learning rate to %s.' % (training_context['current_epoch'] + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def on_batch_end(self, training_context):
if self.unit_base is None:
if training_context['total_batch']>_session.epoch_equivalent:
self.unit_base='epoch'
print('one epoch have {0} batches, use {1} as epoch equivalent in long epoch. '.format(training_context['total_batch'],_session.epoch_equivalent))
else:
self.unit_base = 'epoch'
print('ReduceLROnPlateau reseted.')
num_batches = training_context['steps']
if self.unit_base=='epoch' and training_context['steps']>0 and training_context['steps']%_session.epoch_equivalent==0 and training_context['current_model'].training==True:
training_context['current_lr']=training_context['optimizer'].lr
history=training_context['losses'].get(self.monitor,training_context['metrics'].get(self.monitor,training_context['losses']['total_losses']))
steps,values=zip(*history)
current =to_numpy(values[-min(5,len(values)):]).mean()
if current is None:
warnings.warn(
'Reduce LR on | |
<gh_stars>0
"""Runtime parameters
=====================
Scripting interface for Nek5000 :ref:`parameter file <nek:case_files_par>`.
"""
import json
import logging
import os
import sys
import textwrap
from ast import literal_eval
from configparser import ConfigParser
from io import StringIO
from math import nan
from pathlib import Path
from fluidsim_core.params import Parameters as _Parameters
from inflection import camelize, underscore
from .log import logger
from .solvers import get_solver_short_name, import_cls_simul
literal_python2nek = {
nan: "<real>",
"nan": "nan",
"None": "none",
"True": "yes",
"False": "no",
}
literal_nek2python = {v: k for k, v in literal_python2nek.items()}
literal_prune = ("<real>", "", "nan")
#: JSON file name to which recorded user_params are saved
filename_map_user_params = "map_user_params.json"
def _as_nek_value(input_value):
"""Convert Python values to equivalent Nek5000 par values."""
# Convert to string to avoid hash collisions
# hash(1) == hash(True)
literal = str(input_value) if input_value is not nan else nan
value = literal_python2nek.get(literal, input_value)
return value
def camelcase(value):
"""Convert strings to ``camelCase``."""
return camelize(str(value).lower(), uppercase_first_letter=False)
def _check_user_param_index(idx):
"""Check if the index of user parameter is within bounds"""
if idx > 20:
raise ValueError(f"userParam {idx = } > 20")
elif idx > 10:
logger.warning(
"Due to a bug in Nek5000, the last index of userParam## that we can "
f"specify seems to be 10. {idx = } may not work"
)
def _as_python_value(input_value):
"""Convert Nek5000 par values to equivalent Python values if possible."""
value = literal_nek2python.get(str(input_value), input_value)
try:
return literal_eval(value)
except (SyntaxError, ValueError):
return value
def load_params(path_dir="."):
"""Load a :class:`snek5000.params.Parameters` instance from `path_dir`.
Parameters
----------
path_dir : str or path-like
Path to a simulation directory.
Returns
-------
params: :class:`snek5000.params.Parameters`
"""
from snek5000.util.files import _path_try_from_fluidsim_path
path_dir = _path_try_from_fluidsim_path(path_dir)
short_name = get_solver_short_name(path_dir)
Simul = import_cls_simul(short_name)
return Simul.load_params_from_file(
path_xml=path_dir / "params_simul.xml", path_par=path_dir / f"{short_name}.par"
)
class Parameters(_Parameters):
"""Container for reading, modifying and writing :ref:`par files
<nek:case_files_par>`.
:param tag: A string representing name of case files (for example: provide
``"abl"`` for files like ``abl.usr, abl.par`` etc).
"""
@classmethod
def _load_params_simul(cls, path=None):
"""Alias for :func:`load_params`"""
return load_params(path or Path.cwd())
def __init__(self, *args, **kwargs):
comments = ("#",)
self._set_internal_attr(
"_par_file",
ConfigParser(comment_prefixes=comments, inline_comment_prefixes=comments),
)
# Only enabled parameters would be written into par file
self._set_internal_attr("_enabled", True)
# User parameters sections should begin with an underscore
self._set_internal_attr("_user", True)
super().__init__(*args, **kwargs)
# Like in Python Nek5000's par files are case insensitive.
# However for consistency, case sensitivity is enforced:
self._par_file.optionxform = str
def _make_dict_attribs(self):
d = super()._make_dict_attribs()
# Append internal attributes
d.update({"_enabled": self._enabled, "_user": self._user})
if hasattr(self, "_recorded_user_params"):
d["_recorded_user_params"] = self._recorded_user_params
return d
def __update_par_section(
self, section_name, section_dict, has_to_prune_literals=True
):
"""Updates a section of the ``par_file`` object from a dictionary."""
par = self._par_file
# Start with underscore if it is a user section
section_name_par = "_" if section_dict["_user"] else ""
section_name_par += section_name.upper().lstrip("_")
if section_name_par not in par.sections():
par.add_section(section_name_par)
if "_recorded_user_params" in section_dict:
recorded_user_params = section_dict.pop("_recorded_user_params")
else:
recorded_user_params = False
for option, value in section_dict.items():
value = _as_nek_value(value)
if has_to_prune_literals and value in literal_prune:
continue
# Make everything consistent where values refer to option names
# if option in ("stop_at", "write_control"):
if str(value) in section_dict:
value = camelcase(value)
par.set(section_name_par, camelcase(option), str(value))
# _recorded_user_params -> userParam%%
if not recorded_user_params:
return
params = self._parent
if self._tag != "nek" or params._tag != "params":
raise RuntimeError(
"_recorded_user_params should only be in params.nek.general"
)
for idx_uparam in sorted(recorded_user_params.keys()):
tag = recorded_user_params[idx_uparam]
_check_user_param_index(idx_uparam)
value = _as_nek_value(params[tag])
par.set(
section_name_par,
f"userParam{idx_uparam:02d}",
str(value),
)
def _sync_par(self, has_to_prune_literals=True, keep_all_sections=False):
"""Sync values in param children and attributes to ``self._par_file``
object.
"""
if self._tag_children:
data = [
(child, getattr(self, child)._make_dict_tree())
for child in self._tag_children
]
else:
# No children
data = [(self._tag, self._make_dict_attribs())]
for child, d in data:
# Section name is often written in [UPPERCASE]
section_name = child.upper()
self.__update_par_section(
section_name, d, has_to_prune_literals=has_to_prune_literals
)
self.__tidy_par(keep_all_sections)
def __tidy_par(self, keep_all_sections=False):
"""Remove internal attributes and disabled sections from par file."""
par = self._par_file
for section_name in par.sections():
par.remove_option(section_name, "_user")
if keep_all_sections:
enabled = True
else:
enabled = par.getboolean(section_name, "_enabled")
if enabled:
par.remove_option(section_name, "_enabled")
else:
par.remove_section(section_name)
def _autodoc_par(self, indent=0):
"""Autodoc a code block with ``ini`` syntax and set docstring."""
self._sync_par(has_to_prune_literals=False, keep_all_sections=True)
docstring = "\n.. code-block:: ini\n\n"
with StringIO() as output:
self._par_file.write(output)
ini = output.getvalue()
docstring += textwrap.indent(ini, " ")
if ini:
self._set_doc(self._doc + textwrap.indent(docstring, " " * indent))
def _record_nek_user_params(self, nek_params_keys, overwrite=False):
"""Record some Nek user parameters
Examples
--------
>>> params._record_nek_user_params({"prandtl": 2, "rayleigh": 3})
>>> params.output.history_points._record_nek_user_params({"write_interval": 4})
This is going to set or modify the internal attribute
``params.nek.general._recorded_user_params`` to ``{2: "prandtl", 3:
"rayleigh", 4: "output.other.write_interval"}``.
This attribute is then used to write the ``[GENERAL]`` section of the
.par file.
Note that this attribute is only for ``params.nek.general`` and should
never be set for other parameter children.
"""
# we need to find where is self in the tree compared to `params`
current = self
parent = current._parent
tag = current._tag
path = tag
# iterate up the `params` tree to the top
while not (parent is None and tag == "params") and not (
parent._tag == "info_simul" and tag == "params"
):
current = parent
parent = current._parent
tag = current._tag
path = f"{tag}.{path}"
params = current
assert params._tag == "params"
# path relative to params:
# we have `(path, name)` equal to
# `("params.output.history_points", "write_interval")` or
# `("params", "rayleigh")` and we want to end up with
# `"output.history_points.write_interval"` or `rayleigh`, resp.
path = path[len("params") :]
if path.startswith("."):
path = path[1:]
if path:
path = path + "."
user_params = {}
for name, key in nek_params_keys.items():
user_params[key] = f"{path}{name}"
# Useful while building isolated `params` for a specific class,
# for e.g.: Operators, Output etc.
if not hasattr(params, "nek"):
log_level = logging.DEBUG if "sphinx" in sys.modules else logging.WARNING
logger.log(
log_level,
(
"Attribute params.nek does not exist, skipping "
"initializing user parameters."
),
)
return
general = params.nek.general
if not hasattr(general, "_recorded_user_params"):
general._set_internal_attr("_recorded_user_params", {})
if overwrite:
general._recorded_user_params.update(user_params)
return
for key, value in user_params.items():
if key in general._recorded_user_params:
raise ValueError(
f"{key = } already used for user parameter "
f"{general._recorded_user_params[key]}"
)
general._recorded_user_params[key] = value
def _change_index_userparams(self, user_params):
"""Change indices for user parameters
This method can be used in the ``create_default_params`` class method
of a solver to overwrite the default indices used in the base snek5000
package.
This method checks that no already recorded parameters are overwritten.
To overwrite a parameter, use ``_record_nek_user_params`` with the
``overwrite`` argument.
Examples
--------
>>> params._change_index_userparams({8: "output.history_points.write_interval"}
"""
if self._tag != "params":
raise ValueError(
"The method `_change_index_userparams` has to be called "
"directly with the root `params` object."
)
try:
general = self.nek.general
except AttributeError:
raise AttributeError("No `params.nek.general` attribute.")
try:
recorded_user_params = general._recorded_user_params
except AttributeError:
raise AttributeError(
"No `general._recorded_user_params` attribute. This attribute "
"can be created with `_record_nek_user_params`."
)
# check that no user parameters are overwritten
modified_labels = []
for index in user_params:
try:
modified_labels.append(recorded_user_params[index])
except KeyError:
pass
values = user_params.values()
for label in modified_labels:
if label not in values:
raise ValueError(
f"The value {label} would be removed from the user params."
)
reverted = {value: key for key, value in recorded_user_params.items()}
for label in user_params.values():
try:
key = reverted[label]
except KeyError:
raise ValueError(
f"User parameter {label = } is not already recorded. "
"Use `_record_nek_user_params`"
)
del recorded_user_params[key]
recorded_user_params.update(user_params)
def _save_as_xml(self, path_file=None, comment=None, find_new_name=False):
"""Invoke :func:`_save_recorded_user_params` and then save to an XML file at ``path_file``."""
try:
user_params = self.nek.general._recorded_user_params
except AttributeError:
pass
else:
if path_file is None:
path_dir = Path.cwd()
else:
path_dir = Path(path_file).parent
_save_recorded_user_params(user_params, path_dir)
return super()._save_as_xml(
path_file=path_file, comment=comment, find_new_name=find_new_name
)
def _save_recorded_user_params(user_params, path_dir):
"""Save a JSON file from a dictionary denoting ``user_params``"""
with open(path_dir / filename_map_user_params, "w") as file:
json.dump(user_params, file)
def _load_recorded_user_params(path):
"""Load a JSON file and return a dictionary denoting ``user_params``"""
with open(path) as file:
tmp = json.load(file)
return {int(key): value for key, value in tmp.items()}
def _check_path_like(path):
"""Ensure input is a path-like object"""
if not isinstance(path, os.PathLike):
raise TypeError(f"Expected path-like object, not {type(path) = }")
def _get_params_nek(params):
"""Check if params is the top level object (via the ``_tag`` attribute) and
return the ``params.nek`` object.
Parameters
----------
params: :class:`Parameters`
The ``params`` object
Returns
-------
params.nek: :class:`Parameters`
The ``params.nek`` object
"""
if not isinstance(params, Parameters):
raise TypeError
if params._tag != "params":
raise ValueError(f'{params._tag = } != "params"')
if params.nek._tag | |
the LEFT_ARROW emote is pressed,
and which emote should be added.
return: (discord.Message)
msg: (discord.Message) --> old message
start: (int) --> starting index of the results list
results: (list) --> list of videos from json
'''
new_msg = await sendMsg(ctx, str_to_send=to_send(results, start),
title="Search", color=0x00ffb7, old_msg=msg)
tab = ['1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣']
if start > 4:
tab += ['⬅️']
tab += ['➡️']
await addReactions(new_msg, tab)
return new_msg
async def decision(choice, start, msg, results, nb_results):
''' Function used to decide what to do according to the "choice" variable.
return: (discord.Message) --> actual msg,
(int) --> actual index of "results" list,
(bool) --> True if a music got added to the queue
choice: (str) --> emote from the reaction of the user
msg: (discord.Message) --> last message sent
results: (list) --> list of videos from json
nb_results: (int) --> len(results)
'''
added_to_queue = False
if choice in dictio:
nb = dictio[choice] - 1
nb += start
selected = results[nb]
await add_to_queue(ctx, selected)
added_to_queue = True
elif choice == '➡️':
if start < len(results):
start += 5
msg = await next(msg, start, results, nb_results)
elif choice == '⬅️':
if start >= 5:
start -= 5
msg = await prev(msg, start, results)
return msg, start, added_to_queue
# Choose what to do
msg, start, added_to_queue = await decision(choice, start, msg, results, nb_results)
# Continue this until user select a music to add to the queue
while added_to_queue == False:
try: # Handling events
choice = await robot.wait_for("raw_reaction_add", check=check2, timeout=60)
choice = choice.emoji.name # retreiving emote
except asyncio.TimeoutError:
return
# Choose what to do
msg, start, added_to_queue = await decision(choice, start, msg, results, nb_results)
async def Delete(ctx, nb):
''' Function used to delete a music from the queue.
ctx: context from discord.py
nb: (str) --> index of the music to delete
'''
nb = int(nb) # casting str to int
if len(musics[ctx.guild]) >= nb:
title = musics[ctx.guild][nb-1].title
url = musics[ctx.guild][nb-1].url
del musics[ctx.guild][nb-1]
msg = await sendMsg(ctx, str_to_send=f"**[{title}]({url}) is deleted from the queue.**",
title="Queue update", color=0x00ffb7)
else:
msg = await sendMsg(ctx, str_to_send="**There isn't as much musics in the queue or the queue is empty.**",
title="Error", color=0x00ffb7)
async def Leave(ctx):
''' Function used to make the bot quit the audio channel. (Empty the queue at the same time)
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if client:
await client.disconnect()
musics[ctx.guild] = []
async def Resume(ctx):
''' Function used to resume the music on the bot.
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if client.is_paused():
client.resume()
async def Pause(ctx):
''' Function used to pause the music on the bot.
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if not client.is_paused():
client.pause()
async def Skip(ctx):
''' Function used to skip the music currently playing on the bot.
ctx: context from discord.py
'''
client = ctx.guild.voice_client
if client:
client.stop()
async def Queue(ctx, robot):
def getTime(duration):
''' Function used to transform a duration(int) in sec into a duration(str) like hh:mm:ss.
return: (str)
duration: (int)
'''
total_sec = duration
h = (total_sec - (total_sec % 3600)) / 3600
sec_min_h = (total_sec - h * 3600)
min = (sec_min_h - (sec_min_h % 60)) / 60
sec = sec_min_h - min * 60
time = '{}:{}:{}'.format(int(h), str(
min/10).replace('.', ''), int(sec))
return time
# Check if the bot is connected to a vocal channel
client = ctx.guild.voice_client
pages = [] # if the queue is split into pages, each page will be inside this list
def check(reaction):
if reaction.user_id == ctx.author.id and msg.id == reaction.message_id:
if reaction.emoji.name in ['⬅️', '➡️']:
return reaction
if client: # if connected
# retrieve duration in desired format
time = getTime(now_playing[ctx.guild][0].duration)
# starting to build the string to send
to_print = "```\n" + f"Now playing:\n\t{now_playing[ctx.guild][0].title} ({time})\n\n"
i = 1
queue = musics[ctx.guild]
to_print += f"Total queued: {len(queue)} song(s)\n\n"
if len(queue) > 10: # if queue is too long
y = 1
actual_page = to_print
for music in queue:
time = getTime(music.duration) # retrieve duration
actual_page += f"{i}. {music.title} ({time})\n" # build string to send
if y == 10 or music == queue[-1]: # each 10 music, or at the end of the queue, we end the page
actual_page += "```" # ending actual page
pages += [actual_page] # adding the page to the list of pages
actual_page = "```\n" # starting a new page
y = 1
else:
y += 1
i += 1
i = 0
nb_page = 1
msg = await sendMsg(ctx, str_to_send=pages[i],
title=f"Queue (Page {nb_page})", color=0x00ffb7)
while True:
old = msg
msg = await sendMsg(ctx, str_to_send=pages[i],
title=f"Queue (Page {nb_page})", color=0x00ffb7,
old_msg=old)
if nb_page > 1 and nb_page < len(pages):
emotes = ['⬅️', '➡️']
elif nb_page >= len(pages):
emotes = ['⬅️']
else:
emotes = ['➡️']
await addReactions(msg, emotes)
try: # handling events
react = await robot.wait_for("raw_reaction_add", check=check, timeout=60)
except asyncio.TimeoutError:
return # exit the function if user stop reacting
emoji = react.emoji.name
if emoji == '⬅️':
nb_page -= 1
i -= 1
if emoji == '➡️':
nb_page += 1
i += 1
else: # if queue isn't too loong
for music in queue:
time = getTime(music.duration) # retrieve duration
to_print += f"{i}. {music.title} ({time})\n" # build string to send
i += 1
to_print += "```" # end of the string
msg = await sendMsg(ctx, str_to_send=to_print, title="Music(s) in queue :",
color=0x00ffb7)
else: # if bot not connected
msg = await sendMsg(ctx, str_to_send="**Bot should be connected to your channel to print the queue.**",
title="Error", color=0x00ffb7)
async def play_song(client, queue, song, tab_ctx):
''' Function used to play a music on the bot.
client: (ctx.author.voice.channel.connect())
queue: (list) --> list of musics from youtube_dl
song: ((class)Video) --> Video object
tab_ctx: (list)[ctx, old_msg]
'''
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(song.stream_url,
before_options= "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5"))
ctx = tab_ctx[0]
msg = await sendMsg(ctx, str_to_send=f"**[{song.title}]({song.url})**",
title="Now playing", color=0x00ffb7, old_msg=tab_ctx[1])
def next(_):
if len(queue) > 0:
new_song = queue[0]
now_playing[ctx.guild] = [queue[0]]
del queue[0]
asyncio.run_coroutine_threadsafe(play_song(client, queue, new_song, [ctx, msg]), bot.loop)
else:
asyncio.run_coroutine_threadsafe(client.disconnect(), bot.loop)
try:
client.play(source, after=next)
except:
pass
async def playlist(ctx, url):
''' Function used to add a playlist to the queue.
ctx: context from discord.py
url: (str) --> link of a youtube playlist
'''
client = ctx.guild.voice_client
status = await sendMsg(ctx, "***downloading playlist...***",
"Status", 0x00ffb7)
playlist = ytdl.extract_info(url, download=False)
if client and client.channel: # if bot connected
for video in playlist['entries']: # for each music of the playlist
to_append = Video(ctx, video=video)
musics[ctx.guild].append(to_append) # add each music of the playlist inside the queue
msg = await sendMsg(ctx, f"**[{playlist['title']}]({playlist['webpage_url']})**",
"Playlist queued", 0x00ffb7, status)
else: # if bot not connected
try: # try to connect
channel = ctx.author.voice.channel
except: # if error
msg = await sendMsg(ctx, str_to_send="***You must join a channel for that !***",
old_msg=status)
return
musics[ctx.guild] = [] # creating the queue
now_playing[ctx.guild] = [] # creating now playing
i = 0
for video in playlist['entries']: # for each video of the playlist
if i == 0:
to_play = Video(ctx, video=video)
now_playing[ctx.guild] = [to_play] # currently playing music is stored in there
else:
to_append = Video(ctx, video=video)
musics[ctx.guild].append(to_append) # add each music to queue
i+=1
try: # try to connect to the channel of the user
client = await channel.connect()
except: # if error
msg = await sendMsg(ctx, "**Unable to connect to voice channel**",
"Error", 0x00ffb7)
return
msg = await sendMsg(ctx, f"**[{playlist['title']}]({playlist['webpage_url']})**",
"Now playing playlist", 0x00ffb7)
tab_ctx = [ctx, msg]
# start to play the song
await play_song(client, musics[ctx.guild], to_play, tab_ctx)
async def Play(ctx, args):
client = ctx.guild.voice_client
search = ""
for mot in args:
search += mot + " "
if "https://youtube.com/playlist" in search:
await playlist(ctx, search)
return
elif "https://" in search:
url = search
else:
try:
yt = youtube_search.YoutubeSearch(search, max_results=1).to_json()
except Exception as e:
to_print = "Impossible de charger la vidéo à cause des erreurs suivantes :\n"
to_print += e.args[0][e.args[0].index(' '):]
msg = await sendMsg(ctx, to_print, "Error", 0x00ffb7)
return
try:
yt_id = str(json.loads(yt)['videos'][0]['id'])
url = 'https://www.youtube.com/watch?v=' + yt_id
except:
msg = await | |
self.get_color('node', node_template_type), sep='')
else:
self.generate(uml2_kind, ' "', self.get_label('node', container_name, node_template_type), '" as node_', normalize_name(container_name), self.get_color('node', node_template_type), ' {', sep='')
# Generate a properties map if needed
if properties is not None:
color = self.get_color('node', node_template_type)
if ';' in color:
color = color[:color.find(';')]
self.generate('map "Properties" as node_', normalize_name(container_name), '_properties', color, ' {', sep='')
for property_name in properties:
self.generate(property_name, " => ", str(node_template.get("properties", {}).get(property_name, 'unset')))
self.generate('}')
for contained_name, contained_dict in containeds.items():
generate_container(self, contained_name, contained_dict)
for artifact_name, artifact_yaml in node_template_artifacts.items():
artifact_type = syntax.get_artifact_type(artifact_yaml)
if artifact_type == None:
artifact_type = 'Artifact'
color = self.get_color('artifact', artifact_type)
icon = self.get_representation('artifact', artifact_type, 'icon')
if icon is not None:
self.generate('artifact "<img:', icon, '>" <<artifact>> as node_', normalize_name(container_name), '_artifact_', normalize_name(artifact_name), self.get_color('artifact', artifact_type), " {", sep='')
self.generate('label "', syntax.get_artifact_file(artifact_yaml), '" as ', normalize_name(container_name), '_label', sep='')
self.generate('}')
else:
self.generate('artifact "', self.get_label('artifact', syntax.get_artifact_file(artifact_yaml), artifact_type), '" <<artifact>> as node_', normalize_name(container_name), '_artifact_', normalize_name(artifact_name), self.get_color('artifact', artifact_type), sep='')
self.generate('}')
substitution_mappings = topology_template.get(SUBSTITUTION_MAPPINGS)
if substitution_mappings:
# Create components connected to the capabilities of the substitition mapping.
for capability_name, capability_yaml in get_dict(substitution_mappings, CAPABILITIES).items():
self.generate('component "a node" as substitution_mappings_capability_', normalize_name(capability_name), sep='')
substitution_mappings_node_type = substitution_mappings.get(NODE_TYPE)
self.generate(get_uml2_kind(substitution_mappings_node_type), ' ": ', substitution_mappings_node_type, '" as substitution_mappings', self.get_color('node', substitution_mappings_node_type), ' {', sep='')
for container_name, containeds in containers.items():
generate_container(self, container_name, containeds)
for node_template_name in non_containeds:
generate_container(self, node_template_name, {})
relationship_templates = get_dict(topology_template, RELATIONSHIP_TEMPLATES)
# Iterate over all node templates to draw relationships.
for node_template_name, node_template_yaml in node_templates.items():
merged_node_template_type = self.type_system.merge_node_type(node_template_yaml.get(TYPE))
# Iterate over all requirements of the node template.
for requirement in get_list(node_template_yaml, REQUIREMENTS):
for requirement_name, requirement_yaml in requirement.items():
requirement_relationship_type = None
if type(requirement_yaml) == dict:
requirement_relationship = syntax.get_requirement_relationship(requirement_yaml)
if type(requirement_relationship) == dict:
requirement_relationship_type = syntax.get_relationship_type(requirement_relationship)
else:
relationship_template = relationship_templates.get(requirement_relationship)
if relationship_template:
requirement_relationship_type = relationship_template.get(TYPE)
else:
requirement_relationship_type = requirement_relationship
if requirement_relationship_type == None:
requirement = get_dict(merged_node_template_type, REQUIREMENTS).get(requirement_name, {})
tmp = syntax.get_requirement_relationship(requirement)
requirement_relationship_type = syntax.get_relationship_type(tmp)
if requirement_relationship_type == None:
continue
if not self.type_system.is_derived_from(requirement_relationship_type, 'tosca.relationships.HostedOn'):
requirement_node = get_requirement_node_template(requirement_yaml)
if requirement_node:
direction = self.configuration.get(UML2, 'direction').get(requirement_relationship_type, '')
self.generate('node_', normalize_name(node_template_name), ' .' + direction + '.> node_', normalize_name(requirement_node), ' : <<', short_type_name(requirement_relationship_type), '>>', sep='')
if substitution_mappings:
self.generate('}')
# Connect created components to the nodes exported by the capabilities of the substitition mapping.
for capability_name, capability_yaml in get_dict(substitution_mappings, CAPABILITIES).items():
if type(capability_yaml) != list:
continue # TODO
target_node_name = capability_yaml[0]
target_capability_name = capability_yaml[1]
self.generate('substitution_mappings_capability_', normalize_name(capability_name), ' "', capability_name, '" ..> node_', normalize_name(target_node_name), sep='')
merged_substitution_mappings_node_type = self.type_system.merge_node_type(substitution_mappings_node_type)
# Get all requirements of the node type of the substitution mapping.
all_requirement_declarations = get_dict(merged_substitution_mappings_node_type, REQUIREMENTS)
req_idx = 0
# Iterate over all requirements of the substitution mapping.
for requirement_name, requirement_yaml in syntax.get_substitution_mappings_requirements(substitution_mappings).items():
requirement_capability = syntax.get_requirement_capability(all_requirement_declarations.get(requirement_name))
if requirement_capability == None:
continue
self.generate(get_uml2_kind(requirement_capability), ' ": ', short_type_name(requirement_capability),'" as substitution_mappings_requirement_', req_idx, sep='')
requirement_node = requirement_yaml[0]
requirement_node_capability = requirement_yaml[1]
self.generate('node_', normalize_name(requirement_node), ' "', normalize_name(requirement_node_capability), '" ..> "', requirement_name, '" substitution_mappings_requirement_', req_idx, sep='')
req_idx = req_idx + 1
self.generate('@enduml')
def generate_UML2_workflow_diagrams(self, topology_template):
# get the workflows
workflows = topology_template.get('workflows', {})
def generate_workflow_diagram(workflow_name, workflow_definition):
def step_id(step_name):
return 'step_%s_%s' \
% (normalize_name(workflow_name), \
normalize_name(step_name))
# generate all steps of the current workflow
steps = workflow_definition.get('steps', {})
for step_name, step_definition in steps.items():
# generate a PlantUML state for each step
self.generate('state "%s" as %s << step >> {' % (step_name, step_id(step_name)))
# get the target of the current step
target = step_definition['target']
step_activity_id = step_id(step_name) + '_' + normalize_name(target)
target_relationship = step_definition.get('target_relationship')
if target_relationship is None:
target_label = target
else:
step_activity_id += '_' + normalize_name(target_relationship)
target_label = target + ' ' + normalize_name(target_relationship)
# store ids of all activities of the current step
activity_ids = []
# generate all activities of the current step
for activity in step_definition['activities']:
for key, value in activity.items():
# generate an id of the current activity
activity_id = '%s_%s' % (step_activity_id, value)
activity_ids.append(activity_id)
if key == 'inline':
# generate a step encapsulating the inlined workflow
self.generate(' state "%s" as %s << %s >> {' % (value, activity_id, key))
generate_workflow_diagram(value, workflows.get(value))
self.generate(' }')
else:
# generate a PlantUML state for each activity
self.generate(' state "%s %s" as %s << %s >>' % (target_label, value, activity_id, key))
if len(activity_ids) > 0:
# links consecutive activities
previous_activity_id = activity_ids[0]
for next_activity_id in activity_ids[1:]:
self.generate(' %s --> %s' % (previous_activity_id, next_activity_id))
previous_activity_id = next_activity_id
# close the step
self.generate('}')
# compute the number of predecessors of each step
nb_predecessors = { step_name: 0 for step_name in steps.keys() }
for step_name, step_definition in steps.items():
for next_step in step_definition.get('on_success', []):
nb_predecessors[next_step] += 1
# generate a join for each step having multiple predecessors
for step_name in steps.keys():
if nb_predecessors[step_name] > 1:
sid = step_id(step_name)
self.generate(' state %s_join <<join>>' % sid)
self.generate(' %s_join --> %s' % (sid, sid))
# links the steps
initial_step_names = list(steps.keys())
final_step_names = []
for step_name, step_definition in steps.items():
on_success = step_definition.get('on_success')
if on_success is None or len(on_success) == 0:
# the current step is a final step
final_step_names.append(step_name)
else:
# link the current step to each on_success step
nb_successors = len(on_success)
if nb_successors == 1:
state_source_id = step_id(step_name)
elif nb_successors > 1:
# generate a fork
state_source_id = '%s_fork' % step_id(step_name)
self.generate('state %s <<fork>>' % state_source_id)
self.generate('%s --> %s' % (step_id(step_name), state_source_id))
for next_step in on_success:
target_step_id = step_id(next_step)
if nb_predecessors[next_step] > 1:
target_step_id += '_join'
self.generate('%s --> %s' % (state_source_id, target_step_id))
# next_step is not an initial step
try:
initial_step_names.remove(next_step)
except ValueError:
pass # next_step was already removed from initial_step_names
# link the current step to each on_failure step
for next_step in step_definition.get('on_failure', []):
self.generate('%s -right[#red]-> %s : <color:red>on_failure</color>' % (step_id(step_name), step_id(next_step)))
# next_step is not an initial step
try:
initial_step_names.remove(next_step)
except ValueError:
pass # next_step was already removed from initial_step_names
# link all initial steps
nb_initial_steps = len(initial_step_names)
if nb_initial_steps == 0:
self.error('topology_template:workflows:%s - no initial state' % workflow_name)
elif nb_initial_steps == 1:
initial_state = '[*]'
else: # > 1
# generate a fork
initial_state = '%s_fork' % workflow_name
self.generate('state %s <<fork>>' % initial_state)
self.generate('[*] --> %s' % initial_state)
for step_name in initial_step_names:
self.generate('%s --> %s' % (initial_state, step_id(step_name)))
# link all final steps
nb_final_steps = len(final_step_names)
if nb_final_steps == 0:
self.error('topology_template:workflows:%s - no final state' % workflow_name)
elif nb_final_steps == 1:
final_state = '[*]'
else: # > 1
# generate a join
final_state = '%s_join' % workflow_name
self.generate('state %s <<join>>' % final_state)
self.generate('%s --> [*]' % final_state)
for step_name in final_step_names:
self.generate('%s --> %s' % (step_id(step_name), final_state))
# iterate over all workflows
for workflow_name, workflow_definition in workflows.items():
# open a file for each workflow
self.open_file('-%s-workflow-diagram.plantuml' % workflow_name)
self.generate('@startuml')
# generate PlantUML configuration
self.generate('hide empty description')
self.generate('skinparam shadowing false')
self.generate('skinparam state {')
self.generate(' ArrowColor blue')
self.generate(' BorderColor blue')
self.generate(' EndColor black')
self.generate(' StartColor green')
self.generate(' BackGroundColor<< step >> white')
self.generate(' BorderColor<< step >> black')
self.generate(' BackGroundColor<< delegate >> lightgrey')
self.generate(' BackGroundColor<< set_state >> white')
self.generate(' BackGroundColor<< call_operation >> lightblue')
self.generate(' BackGroundColor<< inline >> white')
self.generate('}')
self.generate('skinparam ActivityBarColor<<fork>> DarkGreen')
self.generate('skinparam ActivityBarColor<<join>> DarkOrange')
self.generate()
generate_workflow_diagram(workflow_name, workflow_definition)
# close the current workflow diagram
self.generate('@enduml')
self.close_file()
def generate_UML2_sequence_diagrams(self, topology_template):
def generate_sequence_diagram(policy_name, policy, trigger_name, trigger):
# open a file for each policy trigger
self.open_file('-%s-%s-sequence-diagram.plantuml' % (policy_name, trigger_name))
self.generate('@startuml')
self.generate('participant "%s\\n%s" as policy_trigger <<policy>>' % (policy_name, trigger_name))
for target in policy.get('targets', []):
if topology_template.get('node_templates', {}).get(target) != None:
stereotype = ' <<node>>'
elif topology_template.get('groups', {}).get(target) != None:
stereotype = ' <<group>>'
else:
stereotype = ''
#TODO self.generate('participant "%s" as target_%s' % (target, normalize_name(target)))
self.generate('participant "%s" as target%s' % (target, stereotype))
self.generate('?-> policy_trigger : %s' % trigger.get('event'))
self.generate('activate policy_trigger')
condition = trigger.get('condition')
if condition != None:
self.generate('note over policy_trigger, target : **condition**:\\n%s'
% self.yamlify_value(condition, ' ', ' ')
)
for action in trigger.get('action', []):
for activity_name, parameters in action.items():
if activity_name == 'call_operation':
target_participant = 'target'
if isinstance(parameters, dict):
message = parameters.get('operation')
message += '('
sep = ''
for input_name, input_value in parameters.get('inputs', {}).items():
message += sep
message += input_name
message += '='
if isinstance(input_value, dict):
value = input_value.get('value')
if value != None:
input_value = value
message += self.stringify_value(input_value)
sep = ', '
message += ')'
else:
message | |
<gh_stars>100-1000
import os
import cv2
import glob
import h5py
import math
import torch
import imageio
import numpy as np
from .dataset import PoseRefinedDataset
from .process_spin import process_spin_data, write_to_h5py, read_spin_data
from .load_surreal import dilate_masks
from .utils.skeleton_utils import get_smpl_l2ws, rotate_y, skeleton3d_to_2d, create_kp_mask
from collections import OrderedDict
def extract_background(data_path, subject="S9"):
import deepdish as dd
mask_h5 = os.path.join(data_path, f"{subject}_mask_fixed.h5")
cameras = ['54138969', '55011271', '58860488', '60457274']
chair_seqs = ['Sitting-', 'Eating-', 'Phoning-', 'Smoking-']
# handling mask
mask_data = dd.io.load(mask_h5)
mask_img_path = mask_data['index']
H = W = mask_data['masks'].shape[-2]
bkgds = np.zeros((len(cameras), H, W, 3), dtype=np.float32)
mask_cnts = np.zeros((len(cameras), H, W, 1), dtype=np.float32)
for i in range(len(mask_img_path)):
img_path = mask_img_path[i]
has_chair = False
for chair_seq in chair_seqs:
if chair_seq in img_path:
has_chair = True
if has_chair:
continue
# read and append img
img = imageio.imread(os.path.join(data_path, img_path))
if img.shape[0] != H:
# this camera has resolution 1002x1000, while others are 1000x1000
img = img[1:-1, ...]
cam_idx = None
for e, camera in enumerate(cameras):
if camera in img_path:
cam_idx = e
if cam_idx is None:
raise ValueError('Camera not found!')
# Calculate bkgd
mask = mask_data['masks'][i]
bkgds[cam_idx] = bkgds[cam_idx] + (img / 255.) * (1 - mask)
mask_cnts[cam_idx] = mask_cnts[cam_idx] + (1 - mask)
bkgds = ((bkgds / np.maximum(mask_cnts, 1)) * 255.).astype(np.uint8)
np.save(os.path.join(data_path, f"{subject}_clean_bkgds_.npy"), bkgds)
return bkgds
def extract_chairs_background(data_path, subject="S9"):
import deepdish as dd
mask_h5 = os.path.join(data_path, f"{subject}_mask_fixed.h5")
cameras = ['54138969', '55011271', '58860488', '60457274']
chair_seqs = ['Sitting-', 'Eating-', 'Phoning-', 'Smoking-']
# handling mask
mask_data = dd.io.load(mask_h5)
mask_img_path = mask_data['index']
H = W = mask_data['masks'].shape[-2]
bkgds = [[] for i in range(len(cameras))]
for i in range(len(mask_img_path)):
img_path = mask_img_path[i]
has_chair = False
for chair_seq in chair_seqs:
if chair_seq in img_path:
has_chair = True
if not has_chair:
continue
# read and append img
img = imageio.imread(os.path.join(data_path, img_path))
if img.shape[0] != H:
# this camera has resolution 1002x1000, while others are 1000x1000
img = img[1:-1, ...]
cam_idx = None
for e, camera in enumerate(cameras):
if camera in img_path:
cam_idx = e
if cam_idx is None:
raise ValueError('Camera not found!')
# Calculate bkgd
mask = mask_data['masks'][i].astype(np.uint8)
bkgd = img
bkgds[cam_idx].append(bkgd)
bkgds = np.array([np.median(bkgd, axis=0) for bkgd in bkgds]).astype(np.uint8)
np.save(os.path.join(data_path, f"{subject}_chair_bkgds_.npy"), bkgds)
return bkgds
def process_h36m_data(data_path, subject="S9", ext_scale=0.001,
res=1.0, bbox_res=224, extend_iter=2, val_cam='54138969', camera_name=None):
'''
:param data_path: path to h3.6m dataset root
:param subject: subject directory
:param ext_scale: to scale human poses and camera location
:param bbox_res: resolution of bounding box when running the pose estimator
:param extend_iter: extend mask to obtain sampling mask
:param val_cam: use the data from this camera for validation
'''
import deepdish as dd
if camera_name is None:
spin_pickle = os.path.join(data_path, f"{subject}_SPIN_rect_output-maxmin.h5")
elif subject != 'S1':
spin_pickle = os.path.join(data_path, f"{subject}-camera=[{camera_name}]-subsample=5.h5")
else:
spin_pickle = os.path.join(data_path, f"{subject}-camera=[{camera_name}]-subsample=1.h5")
bkgds = np.load(os.path.join(data_path, f"{subject.replace('s', '')}_clean_bkgds.npy"))
chair_bkgds = np.load(os.path.join(data_path, f"{subject.replace('s', '')}_chair_bkgds.npy"))
bkgds = np.concatenate([bkgds, chair_bkgds], axis=0)
# handling mask if camera is not specified (since it's more manageable)
if camera_name is None:
mask_h5 = os.path.join(data_path, f"{subject}_mask_deeplab_crop.h5")
else:
mask_h5 = os.path.join(data_path, f"{subject}_{camera_name}_mask_deeplab_crop.h5")
print(f"Load mask from {mask_h5}")
mask_data = dd.io.load(mask_h5)
mask_img_path = mask_data['index']
mask_data['masks'] = mask_data['masks'].astype(np.uint8)
if len(mask_data["masks"].shape) <= 3:
mask_data['masks'] = mask_data['masks'][..., None]
max_val = mask_data['masks'].max()
if max_val > 1:
mask_data['masks'][mask_data['masks'] < 2] = 0
mask_data['masks'][mask_data['masks'] >= 2] = 1
# Do not check H because one of the camera has weird H
H = W = mask_data['masks'].shape[-2]
if 'res' in mask_data:
res = mask_data['res']
if res != 1.0:
H = int(H / res)
W = int(W / res)
new_W, new_H = int(res * W), int(res * H)
resized_bkgds = []
for bkgd in bkgds:
bkgd = cv2.resize(bkgd, (new_W, new_H), interpolation=cv2.INTER_AREA)
resized_bkgds.append(bkgd)
resized_bkgds = np.array(resized_bkgds)
bkgds = resized_bkgds
processed_est = read_spin_data(spin_pickle, ext_scale,
img_res=H,
bbox_res=bbox_res)
if res != 1.0:
processed_est['focals'] = processed_est['focals'] * res
print(f"resolution {res}")
# expand mask to regions that cover the estimated skeleton.
mask_H, mask_W = mask_data['masks'][0].shape[:2]
kp2ds = skeleton3d_to_2d(processed_est['kp3d'], processed_est['c2ws'],
mask_H, mask_W, processed_est['focals'])
sampling_masks = dilate_masks(mask_data['masks'][..., 0], extend_iter)[..., None]
mask_data['sampling_masks'] = sampling_masks
imgs = []
train_idxs, val_idxs = [] , []
cam_idxs = []
# These are the camera used in h36m
if subject != 'S1':
cameras = ['54138969', '55011271', '58860488', '60457274']
else:
cameras = ['60457274']
chair_seqs = ['Sitting-', 'Eating-', 'Phoning-', 'Smoking-']
# * for views with chairs ...
cam_idxs = []
img_paths = processed_est['img_path']
for i in range(len(img_paths)):
img_path = img_paths[i]
if (i + 1) % 100 == 0:
print(f"{i}/{len(img_paths)}")
# find background
offset = 0
for chair_seq in chair_seqs:
if chair_seq in img_path:
offset = offset + len(cameras)
cam_idx = None
for e, camera in enumerate(cameras):
if camera in img_path:
cam_idx = e + offset
break
cam_idxs.append(cam_idx)
# read and append img
img = imageio.imread(os.path.join(data_path, img_path))
if img.shape[0] != H:
# this camera has resolution 1002x1000, while others are 1000x1000
img = img[1:-1, ...]
# TODO: verify nm flag
# mask it out because we can have nice mask now
#img = img * mask_data['masks'][i] + (1 - mask_data['masks'][i]) * bkgds[cam_idx]
if res != 1.0:
new_W, new_H = int(res * W), int(res * H)
img = cv2.resize(img, (new_W, new_H), interpolation=cv2.INTER_AREA)
imgs.append(img)
data = {'imgs': np.array(imgs), #data["imgs"],
'bkgd_idxs': np.array(cam_idxs),
'train_idxs': np.array(train_idxs),
'val_idxs': np.array(val_idxs),
'bkgds': bkgds,
'img_paths': mask_img_path,
**mask_data, **processed_est}
if camera_name is None:
h5_name = f"{subject}_processed_h5py.h5"
else:
h5_name = f"{subject}_{camera_name}_processed_h5py.h5"
print(f"WRITING H5 FILE TO {h5_name}, image size: {imgs[-1].shape}")
write_to_h5py(os.path.join(data_path, h5_name), data)
return data
def find_motion_set(img_paths):
set_dict = OrderedDict()
set_cnt = OrderedDict()
set_idxs = []
for p in img_paths:
set_name = p.split(b'/')[1]
if set_name not in set_dict.keys():
set_dict[set_name] = len(set_dict)
set_cnt[set_name] = 1
else:
set_cnt[set_name] += 1
set_idxs.append(set_dict[set_name])
set_idxs = np.array(set_idxs)
return set_dict, set_cnt, set_idxs
def create_kp_mapping(set_dict, set_cnt, set_idxs, n_views=2):
"""
map from multiple to a single view
"""
assert n_views % 2 == 0
kp_map = []
unique_indices = []
acc_idx = 0
acc_unique = 0
for set_name in set_dict.keys():
num_kp_original = set_cnt[set_name]
num_kps = num_kp_original // n_views
kp_off = np.arange(num_kp_original) % num_kps
k_map = kp_off + acc_idx
unique_idx = kp_off + acc_unique
kp_map.append(k_map)
unique_indices.append(unique_idx)
acc_idx += num_kps
acc_unique += num_kp_original
return np.concatenate(kp_map), np.unique(np.concatenate(unique_indices))
def get_temporal_validity(img_paths):
valid = np.ones((len(img_paths),))
seq_map = np.zeros((len(img_paths),), dtype=np.int32)
seq_cnt = 0
for i, img_path in enumerate(img_paths):
if i == 0:
valid[i] = 0
continue
prev_path = img_paths[i-1]
if os.path.dirname(prev_path) != os.path.dirname(img_path):
#print(f"invalid: prev {prev_path}, cur {img_path}")
valid[i] = 0
seq_cnt += 1
seq_map[i] = seq_cnt
return valid, seq_map
def map_data_to_n_views(img_paths, kp3d, bones, rest_pose,
n_views=4, avg_kps=True):
def set_root(k, k_unique, k_map, root_id=0):
root = k[:, root_id:root_id+1]
if not avg_kps:
other_parts = k_unique[k_map, root_id+1:]
else:
print("avg kps")
other_parts = np.zeros_like(k_unique[:, root_id+1:])
for i, k_idx in enumerate(k_map):
other_parts[k_idx] = other_parts[k_idx] + k[i, root_id+1:]
other_parts = other_parts / float(n_views)
other_parts = other_parts[k_map]
return np.concatenate([root, other_parts], axis=1)
set_dict, set_cnt, set_idxs = find_motion_set(img_paths)
kp_map, kp_uidxs = create_kp_mapping(set_dict, set_cnt, set_idxs, n_views=n_views)
unique_bones = bones[kp_uidxs]
unique_kp3d = kp3d[kp_uidxs]
bones = set_root(bones, unique_bones, kp_map)
kp3d = set_root(kp3d, unique_kp3d, kp_map)
# set skts properly for rendering
l2ws = np.array([get_smpl_l2ws(bone, rest_pose, scale=1.) for bone in bones])
# assume root is at 0
l2ws[..., :3, -1] = l2ws[..., :3, -1] + kp3d[:, 0:1].copy()
skts = np.array([np.linalg.inv(l2w) for l2w in l2ws])
print(f"Data remapped to {n_views}. Note that root-rotation is not with the kp3d anymore.")
return kp_map, kp_uidxs, kp3d, bones, skts
def generate_bullet_time(c2w, n_views=20):
y_angles = np.linspace(0, math.radians(360), n_views+1)[:-1]
c2ws = []
for a in y_angles:
c = rotate_y(a) @ c2w
c2ws.append(c)
return np.array(c2ws)
def save_masks(data_path, subject='S9'):
import deepdish as dd
data_h5 = os.path.join(data_path, f"{subject}_processed.h5")
#data_h5 = os.path.join(data_path, f"S11_processed_.h5")
data = dd.io.load(data_h5)
img_paths = data["img_paths"]
masks = data["masks"]
for img_path, mask in zip(img_paths, masks):
# S11/Directions-1/imageSequence/54138969/img_000001.jpg
name_parts = img_path.split("/")
mask_dir = os.path.join(*name_parts[:-1]).replace('imageSequence', 'Mask')
os.makedirs(os.path.join(data_path, mask_dir), exist_ok=True)
#mask[]
print(np.unique(mask))
imageio.imwrite(os.path.join(data_path, mask_dir, name_parts[-1]), (mask * 255).astype(np.uint8))
class H36MDataset(PoseRefinedDataset):
# define the attribute for rendering data
render_skip = 80
N_render = 15
refined_paths = {
'S9': ('data/h36m/S9_refined_64.tar', True),
'S11': ('data/h36m/S11_refined_64.tar', True),
}
def init_meta(self):
dataset = h5py.File(self.h5_path, 'r', swmr=True)
img_paths = dataset['img_paths'][:]
val_sets = ['Greeting-', 'Walking-', 'Posing-']
self._idx_map = None
if self.subject.endswith('c'):
idxs = []
for i, p in enumerate(img_paths):
seq = p.decode().split('/')[1]
if seq.endswith('-1'):
idxs.append(i)
self._idx_map = np.array(idxs)
elif self.split != 'full':
train_idxs, val_idxs = [], []
# check if an image blongs to validation set
for i, p in | |
import gym
import time
import numpy as np
import tensorflow as tf
from overcooked_ai_py.mdp.actions import Direction, Action
from overcooked_ai_py.mdp.overcooked_mdp import OvercookedGridworld
from overcooked_ai_py.mdp.overcooked_env import OvercookedEnv
from overcooked_ai_py.mdp.layout_generator import LayoutGenerator
from overcooked_ai_py.agents.agent import AgentFromPolicy, AgentPair, RandomAgent
from overcooked_ai_py.utils import load_pickle, save_pickle, load_dict_from_file
from human_aware_rl.utils import create_dir_if_not_exists, num_tf_params, get_max_iter
from baselines.ppo2.ppo2 import learn
from baselines.common.vec_env import VecEnvWrapper
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.models import register
class RewardShapingEnv(VecEnvWrapper):
"""
Wrapper for the Baselines vectorized environment, which
modifies the reward obtained to be a combination of intrinsic
(dense, shaped) and extrinsic (sparse, from environment) reward"""
def __init__(self, env, reward_shaping_factor=0.0):
super().__init__(env)
self.reward_shaping_factor = reward_shaping_factor
self.env_name = "Overcooked-v0"
### Set various attributes to false, than will then be overwritten by various methods
# Whether we want to query the actual action method from the agent class,
# or we use direct_action. Might change things if there is post-processing
# of actions returned, as in the Human Model
self.use_action_method = False
# Fraction of self-play actions/trajectories (depending on value of self.trajectory_sp)
self.self_play_randomization = 0.0
# Whether SP randomization should be done on a trajectory level
self.trajectory_sp = False
# Whether the model is supposed to output the joint action for all agents (centralized policy)
# Joint action models are currently deprecated.
self.joint_action_model = False
def reset(self):
return self.venv.reset()
def step_wait(self):
obs, rew, done, infos = self.venv.step_wait()
# replace rew with shaped rew
for env_num in range(self.num_envs):
dense_reward = infos[env_num]['shaped_r']
rew = list(rew)
shaped_rew = rew[env_num] + float(dense_reward) * self.reward_shaping_factor
rew[env_num] = shaped_rew
if done[env_num]:
# Log both sparse and dense rewards for episode
sparse_ep_rew = infos[env_num]['episode']['ep_sparse_r']
dense_ep_rew = infos[env_num]['episode']['ep_shaped_r']
infos[env_num]['episode']['r'] = sparse_ep_rew + dense_ep_rew * self.reward_shaping_factor
return obs, rew, done, infos
def update_reward_shaping_param(self, reward_shaping_factor):
"""Takes in what fraction of the run we are at, and determines the reward shaping coefficient"""
self.reward_shaping_factor = reward_shaping_factor
class LinearAnnealer():
"""Anneals a parameter from 1 to 0 over the course of training,
over a specified horizon."""
def __init__(self, horizon):
self.horizon = horizon
def param_value(self, timestep):
if self.horizon == 0:
return 0
curr_value = max(1 - (timestep / self.horizon), 0)
assert 0 <= curr_value <= 1
return curr_value
class DummyEnv(object):
"""
Class used to save number of envs, observation space and action
space data, when loading and saving baselines models
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
pass
########################
# UTILS AND HELPER FNS #
########################
# @register("conv_and_mlp") is here
@register("conv_and_mlp")
def conv_network_fn(**kwargs):
"""Used to register custom network type used by Baselines for Overcooked"""
if "network_kwargs" in kwargs.keys():
params = kwargs["network_kwargs"]
else:
params = kwargs
num_hidden_layers = params["NUM_HIDDEN_LAYERS"]
size_hidden_layers = params["SIZE_HIDDEN_LAYERS"]
num_filters = params["NUM_FILTERS"]
num_convs = params["NUM_CONV_LAYERS"]
def network_fn(X):
print(X.shape)
conv_out = tf.layers.conv2d(
inputs=X,
filters=num_filters,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.leaky_relu,
name="conv_initial"
)
for i in range(0, num_convs - 1):
padding = "same" if i < num_convs - 2 else "valid"
conv_out = tf.layers.conv2d(
inputs=conv_out,
filters=num_filters,
kernel_size=[3, 3],
padding=padding,
activation=tf.nn.leaky_relu,
name="conv_{}".format(i)
)
out = tf.layers.flatten(conv_out)
for _ in range(num_hidden_layers):
out = tf.layers.dense(out, size_hidden_layers, activation=tf.nn.leaky_relu)
print("Last layer conv network output shape", out.shape)
# NOTE: not sure if not supposed to add linear layer. I think it is though,
# as things work and similar to code in baseline/models.py? Maybe double check later.
# To check how many parameters uncomment next line
# num_tf_params()
return out
return network_fn
def get_vectorized_gym_env(base_env, gym_env_name, agent_idx, featurize_fn=None, **kwargs):
"""
Create a one-player overcooked gym environment in which the other player is fixed (embedded in the environment)
base_env: A OvercookedEnv instance (fixed or variable map)
sim_threads: number of threads used during simulation, that corresponds to the number of parallel
environments used
"""
def gym_env_fn():
gym_env = gym.make(gym_env_name)
if kwargs["RUN_TYPE"] == "joint_ppo":
# If doing joint training, action space will be different (^2 compared to single agent training)
gym_env.custom_init(base_env, joint_actions=True, featurize_fn=featurize_fn, baselines=True, agent_idx=agent_idx)
else:
gym_env.custom_init(base_env, featurize_fn=featurize_fn, baselines=True, agent_idx=agent_idx)
return gym_env
vectorized_gym_env = RewardShapingEnv(SubprocVecEnv([gym_env_fn] * kwargs["sim_threads"]))
return vectorized_gym_env
def get_pbt_agent_from_config(save_dir=None, sim_threads=0, seed=0, agent_idx=0, best=False, agent_to_load_path=None):
if agent_to_load_path is None:
agent_folder = save_dir + 'seed_{}/agent{}'.format(seed, agent_idx)
if best:
agent_to_load_path = agent_folder + "/best"
else:
agent_to_load_path = agent_folder + "/pbt_iter" + str(get_max_iter(agent_folder))
agent = get_agent_from_saved_model(agent_to_load_path, sim_threads)
return agent
def get_agent_from_saved_model(save_dir, sim_threads):
"""Get Agent corresponding to a saved model"""
# NOTE: Could remove dependency on sim_threads if get the sim_threads from config or dummy env
state_policy, processed_obs_policy = get_model_policy_from_saved_model(save_dir, sim_threads)
return AgentFromPolicy(state_policy, processed_obs_policy)
def get_agent_from_model(model, sim_threads, is_joint_action=False):
"""Get Agent corresponding to a loaded model"""
state_policy, processed_obs_policy = get_model_policy_from_model(model, sim_threads, is_joint_action=is_joint_action)
return AgentFromPolicy(state_policy, processed_obs_policy)
def get_random_agent_model(sim_threads):
"""Get RandomAgent"""
return RandomAgent(sim_threads)
def get_model_policy_from_saved_model(save_dir, sim_threads):
"""Get a policy function from a saved model"""
predictor = tf.contrib.predictor.from_saved_model(save_dir)
step_fn = lambda obs: predictor({"obs": obs})["action_probs"]
return get_model_policy(step_fn, sim_threads)
def get_model_policy_from_model(model, sim_threads, is_joint_action=False):
def step_fn(obs):
action_probs = model.act_model.step(obs, return_action_probs=True)
return action_probs
return get_model_policy(step_fn, sim_threads, is_joint_action=is_joint_action)
def get_model_policy(step_fn, sim_threads, is_joint_action=False):
"""
Returns the policy function `p(s, index)` from a saved model at `save_dir`.
step_fn: a function that takes in observations and returns the corresponding
action probabilities of the agent
"""
def encoded_state_policy(observations, stochastic=True, return_action_probs=False):
"""Takes in SIM_THREADS many losslessly encoded states and returns corresponding actions"""
action_probs_n = step_fn(observations)
if return_action_probs:
return action_probs_n
if stochastic:
action_idxs = [np.random.choice(len(Action.ALL_ACTIONS), p=action_probs) for action_probs in action_probs_n]
else:
action_idxs = [np.argmax(action_probs) for action_probs in action_probs_n]
return np.array(action_idxs)
def state_policy(mdp_state, mdp, agent_index, stochastic=True, return_action_probs=False):
"""Takes in a Overcooked state object and returns the corresponding action"""
obs = mdp.lossless_state_encoding(mdp_state)[agent_index]
padded_obs = np.array([obs] + [np.zeros(obs.shape)] * (sim_threads - 1))
action_probs = step_fn(padded_obs)[0] # Discards all padding predictions
if return_action_probs:
return action_probs
if stochastic:
action_idx = np.random.choice(len(action_probs), p=action_probs)
else:
action_idx = np.argmax(action_probs)
if is_joint_action:
# NOTE: Probably will break for this case, untested
action_idxs = Action.INDEX_TO_ACTION_INDEX_PAIRS[action_idx]
joint_action = [Action.INDEX_TO_ACTION[i] for i in action_idxs]
return joint_action
return Action.INDEX_TO_ACTION[action_idx]
return state_policy, encoded_state_policy
def create_model(env, agent_name, use_pretrained_weights=False, **kwargs):
"""Creates a model and saves it at a location
env: a dummy environment that is used to determine observation and action spaces
agent_name: the scope under which the weights of the agent are saved
"""
model, _ = learn(
network=kwargs["NETWORK_TYPE"],
env=env,
total_timesteps=1,
save_interval=0,
nsteps=kwargs["BATCH_SIZE"],
nminibatches=kwargs["MINIBATCHES"],
noptepochs=kwargs["STEPS_PER_UPDATE"],
scope=agent_name,
network_kwargs=kwargs
)
model.agent_name = agent_name
model.dummy_env = env
return model
def save_baselines_model(model, save_dir):
"""
Saves Model (from baselines) into `path/model` file,
and saves the tensorflow graph in the `path` directory
NOTE: Overwrites previously saved models at the location
"""
create_dir_if_not_exists(save_dir)
model.save(save_dir + "/model")
# We save the dummy env so that one doesn't
# have to pass in an actual env to load the model later,
# as the only information taken from the env are these parameters
# at test time (if no training happens)
dummy_env = DummyEnv(
model.dummy_env.num_envs,
model.dummy_env.observation_space,
model.dummy_env.action_space
)
save_pickle(dummy_env, save_dir + "/dummy_env")
def load_baselines_model(save_dir, agent_name, config):
"""
NOTE: Before using load it might be necessary to clear the tensorflow graph
if there are already other variables defined
"""
dummy_env = load_pickle(save_dir + "/dummy_env")
model, _ = learn(
network='conv_and_mlp',
env=dummy_env,
total_timesteps=0,
load_path=save_dir + "/model",
scope=agent_name,
network_kwargs=config
)
model.dummy_env = dummy_env
return model
def update_model(env, model, population=None, ent_version=1, metric_np=None, **kwargs):
"""
Train agent defined by a model using the specified environment.
The idea is that one can update model on a different environment than the one
that was used to create the model (vs a different agent for example, where the
agent is embedded within the environment)
"""
def model_fn(**kwargs):
return model
updated_model, run_info = learn(
network=kwargs["NETWORK_TYPE"],
env=env,
total_timesteps=kwargs["PPO_RUN_TOT_TIMESTEPS"],
nsteps=kwargs["BATCH_SIZE"],
ent_coef=kwargs["ENTROPY"],
ent_pool_coef=kwargs["ENTROPY_POOL"],
lr=kwargs["LR"],
vf_coef=kwargs["VF_COEF"],
max_grad_norm=kwargs["MAX_GRAD_NORM"],
gamma=kwargs["GAMMA"],
lam=kwargs["LAM"],
nminibatches=kwargs["MINIBATCHES"],
noptepochs=kwargs["STEPS_PER_UPDATE"],
cliprange=kwargs["CLIPPING"],
model_fn=model_fn,
population=population,
ent_version=ent_version,
metric_np=metric_np,
save_interval=0,
log_interval=1,
network_kwargs=kwargs
)
return run_info
def overwrite_model(model_from, model_to):
model_from_vars = tf.trainable_variables(model_from.scope)
model_to_vars = tf.trainable_variables(model_to.scope)
overwrite_variables(model_from_vars, model_to_vars)
def overwrite_variables(variables_to_copy, variables_to_overwrite):
sess = tf.get_default_session()
restores = []
assert len(variables_to_copy) == len(variables_to_overwrite), 'number of variables loaded mismatches len(variables)'
for d, v in zip(variables_to_copy, variables_to_overwrite):
restores.append(v.assign(d))
sess.run(restores)
############################
#### DEPRECATED METHODS ####
############################
def get_model_value_fn(model, sim_threads, debug=False):
"""Returns the estimated value function `V(s, index)` from a saved model at `save_dir`."""
print(model)
def value_fn(mdp_state, mdp, agent_index):
obs = mdp.lossless_state_encoding(mdp_state, debug=debug)[agent_index]
padded_obs = np.array([obs] | |
Exception as e:
# NOTE(Vek): Ignoring exceptions here is safe, because the
# usage resynchronization and the reservation expiration
# mechanisms will resolve the issue. The exception is
# logged, however, because this is less than optimal.
msg = str(e)
LOG.exception(_LE("Failed to roll back reservations"
" %(reservations)s, exception %(msg)s"),
{'reservations': reservations, 'msg': msg})
def destroy_by_project(self, context, project_id):
"""Destroy all quota limits associated with a project.
:param context: The request context, for access checks.
:param project_id: The ID of the project being deleted.
"""
self._driver.destroy_by_project(context, project_id)
def expire(self, context):
"""Expire reservations.
Explores all currently existing reservations and rolls back
any that have expired.
:param context: The request context, for access checks.
"""
self._driver.expire(context)
def add_volume_type_opts(self, context, opts, volume_type_id):
"""Add volume type resource options.
Adds elements to the opts hash for volume type quotas.
If a resource is being reserved ('gigabytes', etc) and the volume
type is set up for its own quotas, these reservations are copied
into keys for 'gigabytes_<volume type name>', etc.
:param context: The request context, for access checks.
:param opts: The reservations options hash.
:param volume_type_id: The volume type id for this reservation.
"""
if not volume_type_id:
return
# NOTE(jdg): set inactive to True in volume_type_get, as we
# may be operating on a volume that was created with a type
# that has since been deleted.
# quota based on volume_type is not supported currently
# volume_type = db_api.volume_type_get(context, volume_type_id, True)
# for quota in ('volumes', 'gigabytes', 'snapshots'):
# if quota in opts:
# vtype_quota = "%s_%s" % (quota, volume_type['name'])
# opts[vtype_quota] = opts[quota]
@property
def resource_names(self):
return sorted(self.resources.keys())
@property
def resources(self):
return self._resources
class AllQuotaEngine(QuotaEngine):
"""Represent the set of all quotas."""
@property
def resources(self):
"""Fetches all possible quota resources."""
result = {}
# Global quotas.
# Set sync_func to None for no sync function in Trio2o
reservable_argses = [
('instances', None, 'quota_instances'),
('cores', None, 'quota_cores'),
('ram', None, 'quota_ram'),
('security_groups', None, 'quota_security_groups'),
('floating_ips', None, 'quota_floating_ips'),
('fixed_ips', None, 'quota_fixed_ips'),
('server_groups', None, 'quota_server_groups'),
('volumes', None, 'quota_volumes'),
('per_volume_gigabytes', None, 'per_volume_size_limit'),
('snapshots', None, 'quota_snapshots'),
('gigabytes', None, 'quota_gigabytes'),
('backups', None, 'quota_backups'),
('backup_gigabytes', None, 'quota_backup_gigabytes'),
('consistencygroups', None, 'quota_consistencygroups')
]
absolute_argses = [
('metadata_items', 'quota_metadata_items'),
('injected_files', 'quota_injected_files'),
('injected_file_content_bytes',
'quota_injected_file_content_bytes'),
('injected_file_path_bytes',
'quota_injected_file_path_length'),
]
# TODO(joehuang), for countable, the count should be the
# value in the db but not 0 here
countable_argses = [
('security_group_rules', None, 'quota_security_group_rules'),
('key_pairs', None, 'quota_key_pairs'),
('server_group_members', None, 'quota_server_group_members'),
]
for args in reservable_argses:
resource = ReservableResource(*args)
result[resource.name] = resource
for args in absolute_argses:
resource = AbsoluteResource(*args)
result[resource.name] = resource
for args in countable_argses:
resource = CountableResource(*args)
result[resource.name] = resource
return result
def register_resource(self, resource):
raise NotImplementedError(_("Cannot register resource"))
def register_resources(self, resources):
raise NotImplementedError(_("Cannot register resources"))
QUOTAS = AllQuotaEngine()
class QuotaSetOperation(object):
"""Operation on Quota set."""
def __init__(self, target_tenant_id, user_id=None):
self.target_tenant_id = target_tenant_id
self.user_id = user_id
# used in test
def update_hierarchy(self, target_tenant_id, user_id=None):
self.target_tenant_id = target_tenant_id
self.user_id = user_id
class GenericProjectInfo(object):
"""Abstraction layer for Keystone V2 and V3 project objects"""
def __init__(self, project_id, project_keystone_api_version,
project_parent_id=None, project_subtree=None):
self.id = project_id
self.keystone_api_version = project_keystone_api_version
self.parent_id = project_parent_id
self.subtree = project_subtree
def _format_quota_set(self, tenant_id, quota_set):
"""Convert the quota object to a result dict."""
quota_set['id'] = str(tenant_id)
return dict(quota_set=quota_set)
def _keystone_client(self, context):
c = client.Client()
return c.get_keystone_client_by_context(context)
def _validate_existing_resource(self, key, value, quota_values):
if key == 'per_volume_gigabytes':
return
v = quota_values.get(key, {})
_usage = v.get('in_use', 0) + v.get('reserved', 0)
if value < _usage:
msg = _("Quota %(key)s limit %(value)d must be equal or "
"greater than existing resources"
"%(_usage)d.") % {'key': key, 'value': value,
'_usage': _usage}
LOG.error(msg=msg)
raise t_exceptions.ValidationError(msg=msg)
@staticmethod
def _validate_integer(value, name, min_value=None, max_value=None):
"""Make sure that value is a valid integer, potentially within range.
:param value: the value of the integer
:param name: the name of the integer
:param min_length: the min_length of the integer
:param max_length: the max_length of the integer
:returns: integer
"""
try:
value = int(value)
except (TypeError, ValueError, UnicodeEncodeError):
msg = _('%s must be an integer.') % name
LOG.error(msg=msg)
raise t_exceptions.ValidationError(msg=msg)
if min_value is not None and value < min_value:
msg = _('%(value_name)s must be >= '
'%(min_value)d') % {'value_name': name,
'min_value': min_value}
LOG.error(msg=msg)
raise t_exceptions.ValidationError(msg=msg)
if max_value is not None and value > max_value:
msg = _('%(value_name)s must be <= '
'%(max_value)d') % {'value_name': name,
'max_value': max_value}
LOG.error(msg=msg)
raise t_exceptions.ValidationError(msg=msg)
return value
def _validate_quota_limit(self, quota, key, project_quotas=None,
parent_project_quotas=None):
limit = self._validate_integer(quota[key], key, min_value=-1,
max_value=cons.MAX_INT)
if parent_project_quotas:
free_quota = (parent_project_quotas[key]['limit'] -
parent_project_quotas[key]['in_use'] -
parent_project_quotas[key]['reserved'] -
parent_project_quotas[key].get('allocated', 0))
current = 0
if project_quotas.get(key):
current = project_quotas[key]['limit']
if limit - current > free_quota:
msg = _("Free quota available is %s.") % free_quota
LOG.error(msg=msg)
raise t_exceptions.ValidationError(msg=msg)
return limit
def _get_quotas(self, context, id, usages=False, parent_project_id=None):
values = QUOTAS.get_project_quotas(
context, id, usages=usages,
parent_project_id=parent_project_id)
if usages:
return values
else:
return {k: v['limit'] for k, v in values.items()}
def _authorize_update_or_delete(self, context_project,
target_project_id,
parent_id):
"""Checks if update or delete are allowed in the current hierarchy.
With hierarchical projects, only the admin of the parent or the root
project has privilege to perform quota update and delete operations.
:param context_project: The project in which the user is scoped to.
:param target_project_id: The id of the project in which the
user want to perform an update or
delete operation.
:param parent_id: The parent id of the project in which the user
want to perform an update or delete operation.
"""
param_msg = _("context_project.parent_id = %(ctx_parent_id)s, "
"parent_id = %(parent_id)s, "
"context_project.id = %(ctx_project_id)s, "
"target_project_id = "
"%(target_project_id)s, ") % {
"ctx_parent_id": context_project.parent_id,
"parent_id": parent_id,
"ctx_project_id": context_project.id,
"target_project_id": target_project_id}
if context_project.parent_id and parent_id != context_project.id:
msg = _("Update and delete quota operations can only be made "
"by an admin of immediate parent or by the CLOUD admin."
"%s") % param_msg
LOG.error(msg=msg)
raise t_exceptions.HTTPForbiddenError(msg=msg)
if context_project.id != target_project_id:
if not self._is_descendant(target_project_id,
context_project.subtree):
msg = _("Update and delete quota operations can only be made "
"to projects in the same hierarchy of the project in "
"which users are scoped to."
"%s") % param_msg
LOG.error(msg=msg)
raise t_exceptions.HTTPForbiddenError(msg=msg)
else:
msg = _("Update and delete quota operations can only be made "
"by an admin of immediate parent or by the CLOUD admin."
"%s") % param_msg
LOG.error(msg=msg)
raise t_exceptions.HTTPForbiddenError(msg=msg)
def _authorize_show(self, context_project, target_project):
"""Checks if show is allowed in the current hierarchy.
With hierarchical projects, are allowed to perform quota show operation
users with admin role in, at least, one of the following projects: the
current project; the immediate parent project; or the root project.
:param context_project: The project in which the user
is scoped to.
:param target_project: The project in which the user wants
to perform a show operation.
"""
param_msg = _("target_project.parent_id = %(target_parent_id)s, "
"target_project_id = %(target_project_id)s, "
"context_project.id = %(ctx_project_id)s, "
"context_project.parent_id = %(ctx_parent_id)s, ") % {
"target_parent_id": target_project.parent_id,
"target_project_id": target_project.id,
"ctx_project_id": context_project.id,
"ctx_parent_id": context_project.parent_id}
if target_project.parent_id:
if target_project.id != context_project.id:
if not self._is_descendant(target_project.id,
context_project.subtree):
msg = _("Show operations can only be made to projects in "
"the same hierarchy of the project in which users "
"are scoped to."
"%s") % param_msg
LOG.error(msg=msg)
raise t_exceptions.HTTPForbiddenError(msg=msg)
if context_project.id != target_project.parent_id:
if context_project.parent_id:
msg = _("Only users with token scoped to immediate "
"parents or root projects are allowed to see "
"its children quotas."
"%s") % param_msg
LOG.error(msg=msg)
raise t_exceptions.HTTPForbiddenError(msg=msg)
elif context_project.parent_id:
msg = _("An user with a token scoped to a subproject is not "
"allowed to see the quota of its parents."
"%s") % param_msg
LOG.error(msg=msg)
raise t_exceptions.HTTPForbiddenError(msg=msg)
def _is_descendant(self, target_project_id, subtree):
if subtree is not None:
for key, value in subtree.items():
if key == target_project_id:
return True
if self._is_descendant(target_project_id, value):
return True
return False
def _get_project(self, context, id, subtree_as_ids=False):
"""A Helper method to get the project hierarchy.
Along with Hierachical Multitenancy in keystone API v3, projects can be
hierarchically organized. Therefore, we need to know the project
hierarchy, if any, in order to do quota operations properly.
"""
try:
keystone = self._keystone_client(context)
generic_project = self.GenericProjectInfo(id, keystone.version)
if keystone.version == | |
from typing import Dict, List, Optional, Tuple, Union
import typing
import numpy as np
from ConfigSpace.hyperparameters import \
CategoricalHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter,Constant
import sklearn.gaussian_process.kernels
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
from sklearn.exceptions import NotFittedError
from skopt.learning.gaussian_process.kernels import Kernel
from skopt.learning.gaussian_process import GaussianProcessRegressor
from xbbo.configspace.space import DenseConfigurationSpace
from xbbo.surrogate.gp_prior import Prior, SoftTopHatPrior, TophatPrior
class SurrogateModel(object):
"""Abstract implementation of the Model API.
**Note:** The input dimensionality of Y for training and the output dimensions
of all predictions (also called ``n_objectives``) depends on the concrete
implementation of this abstract class.
Attributes
----------
instance_features : np.ndarray(I, K)
Contains the K dimensional instance features
of the I different instances
pca : sklearn.decomposition.PCA
Object to perform PCA
pca_components : float
Number of components to keep or None
n_feats : int
Number of instance features
n_params : int
Number of parameters in a configuration (only available after train has
been called)
scaler : sklearn.preprocessing.MinMaxScaler
Object to scale data to be withing [0, 1]
var_threshold : float
Lower bound vor variance. If estimated variance < var_threshold, the set
to var_threshold
types : list
If set, contains a list with feature types (cat,const) of input vector
"""
def __init__(self,
types: np.ndarray,
bounds: typing.List[typing.Tuple[float, float]],
instance_features: np.ndarray = None,
pca_components: float = None,
**kwargs):
"""Constructor
Parameters
----------
types : np.ndarray (D)
Specifies the number of categorical values of an input dimension where
the i-th entry corresponds to the i-th input dimension. Let's say we
have 2 dimension where the first dimension consists of 3 different
categorical choices and the second dimension is continuous than we
have to pass np.array([2, 0]). Note that we count starting from 0.
bounds : list
Specifies the bounds for continuous features.
instance_features : np.ndarray (I, K)
Contains the K dimensional instance features
of the I different instances
pca_components : float
Number of components to keep when using PCA to reduce
dimensionality of instance features. Requires to
set n_feats (> pca_dims).
"""
self.instance_features = instance_features
self.pca_components = pca_components
if instance_features is not None:
self.n_feats = instance_features.shape[1]
else:
self.n_feats = 0
self.n_params = None # will be updated on train()
self.pca = None
self.scaler = None
if self.pca_components and self.n_feats > self.pca_components:
self.pca = PCA(n_components=self.pca_components)
self.scaler = MinMaxScaler()
# Never use a lower variance than this
self.var_threshold = 10**-5
self.bounds = bounds
self.types = types
# Initial types array which is used to reset the type array at every call to train()
self._initial_types = types.copy()
self.do_optimize = kwargs.get('do_optimize', True)
def train(self, X: np.ndarray, Y: np.ndarray) -> 'SurrogateModel':
"""Trains the Model on X and Y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, n_objectives]
The corresponding target values. n_objectives must match the
number of target names specified in the constructor.
Returns
-------
self : AbstractModel
"""
self.types = self._initial_types.copy()
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' %
len(X.shape))
if X.shape[1] != len(self.types):
raise ValueError(
'Feature mismatch: X should have %d features, but has %d' %
(X.shape[1], len(self.types)))
if X.shape[0] != Y.shape[0]:
raise ValueError('X.shape[0] (%s) != y.shape[0] (%s)' %
(X.shape[0], Y.shape[0]))
self.n_params = X.shape[1] - self.n_feats
# reduce dimensionality of features of larger than PCA_DIM
if self.pca and X.shape[0] > self.pca.n_components:
X_feats = X[:, -self.n_feats:]
# scale features
X_feats = self.scaler.fit_transform(X_feats)
X_feats = np.nan_to_num(X_feats) # if features with max == min
# PCA
X_feats = self.pca.fit_transform(X_feats)
X = np.hstack((X[:, :self.n_params], X_feats))
if hasattr(self, "types"):
# for RF, adapt types list
# if X_feats.shape[0] < self.pca, X_feats.shape[1] ==
# X_feats.shape[0]
self.types = np.array(
np.hstack((self.types[:self.n_params],
np.zeros((X_feats.shape[1])))),
dtype=np.uint,
)
return self._train(X, Y)
def _train(self, X: np.ndarray, Y: np.ndarray, **kwargs) -> 'SurrogateModel':
"""Trains the random forest on X and y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, n_objectives]
The corresponding target values. n_objectives must match the
number of target names specified in the constructor.
Returns
-------
self
"""
raise NotImplementedError
def predict(
self,
X: np.ndarray,
cov_return_type: typing.Optional[str] = 'diagonal_cov'
) -> typing.Tuple[np.ndarray, np.ndarray]:
"""
Predict means and variances for given X.
Parameters
----------
X : np.ndarray of shape = [n_samples, n_features (config + instance features)]
Training samples
Returns
-------
means : np.ndarray of shape = [n_samples, n_objectives]
Predictive mean
vars : np.ndarray of shape = [n_samples, n_objectives]
Predictive variance
"""
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' %
len(X.shape))
if X.shape[1] != len(self._initial_types):
raise ValueError(
'Rows in X should have %d entries but have %d!' %
(len(self._initial_types), X.shape[1]))
if self.pca:
try:
X_feats = X[:, -self.n_feats:]
X_feats = self.scaler.transform(X_feats)
X_feats = self.pca.transform(X_feats)
X = np.hstack((X[:, :self.n_params], X_feats))
except NotFittedError:
pass # PCA not fitted if only one training sample
if X.shape[1] != len(self.types):
raise ValueError(
'Rows in X should have %d entries but have %d!' %
(len(self.types), X.shape[1]))
mean, var = self._predict(X, cov_return_type=cov_return_type)
if cov_return_type is None:
return mean, var
if len(mean.shape) == 1:
mean = mean.reshape((-1, 1))
if len(var.shape) == 1:
var = var.reshape((-1, 1))
return mean, var
def _predict(
self,
X: np.ndarray,
cov_return_type: typing.Optional[str] = 'diagonal_cov'
) -> typing.Tuple[np.ndarray, np.ndarray]:
"""
Predict means and variances for given X.
Parameters
----------
X : np.ndarray
[n_samples, n_features (config + instance features)]
Returns
-------
means : np.ndarray of shape = [n_samples, n_objectives]
Predictive mean
vars : np.ndarray of shape = [n_samples, n_objectives]
Predictive variance
"""
raise NotImplementedError()
def predict_marginalized_over_instances(
self, X: np.ndarray, cov_return_type: typing.Optional[str] = 'diagonal_cov') -> typing.Tuple[np.ndarray, np.ndarray]:
"""Predict mean and variance marginalized over all instances.
Returns the predictive mean and variance marginalised over all
instances for a set of configurations.
Parameters
----------
X : np.ndarray
[n_samples, n_features (config)]
Returns
-------
means : np.ndarray of shape = [n_samples, 1]
Predictive mean
vars : np.ndarray of shape = [n_samples, 1]
Predictive variance
"""
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' %
len(X.shape))
if X.shape[1] != len(self.types):
raise ValueError('Rows in X should have %d entries but have %d!' %
(len(self.types), X.shape[1]))
if self.instance_features is None or \
len(self.instance_features) == 0:
mean, var = self.predict(X, cov_return_type)
var[var < self.var_threshold] = self.var_threshold
var[np.isnan(var)] = self.var_threshold
return mean, var
else:
n_instances = len(self.instance_features)
mean = np.zeros(X.shape[0])
var = np.zeros(X.shape[0])
for i, x in enumerate(X):
X_ = np.hstack((np.tile(x,
(n_instances, 1)), self.instance_features))
means, vars = self.predict(X_, cov_return_type)
# VAR[1/n (X_1 + ... + X_n)] =
# 1/n^2 * ( VAR(X_1) + ... + VAR(X_n))
# for independent X_1 ... X_n
var_x = np.sum(vars) / (len(vars)**2)
if var_x < self.var_threshold:
var_x = self.var_threshold
var[i] = var_x
mean[i] = np.mean(means)
if len(mean.shape) == 1:
mean = mean.reshape((-1, 1))
if len(var.shape) == 1:
var = var.reshape((-1, 1))
return mean, var
def update_weight(self, w, rho=None):
pass
class BaseGP(SurrogateModel):
def __init__(self,
configspace: DenseConfigurationSpace,
types: List[int],
bounds: List[Tuple[float, float]],
rng: np.random.RandomState,
normalize_y: bool = True,
instance_features: Optional[np.ndarray] = None,
pca_components: Optional[int] = None,
**kwargs):
"""
Abstract base class for all Gaussian process models.
"""
super().__init__(types=types,
bounds=bounds,
instance_features=instance_features,
pca_components=pca_components,
**kwargs)
self.configspace = configspace
self.rng = rng
self.normalize_y = normalize_y
kernel = kwargs.get('kernel')
self.kernel = kernel if kernel else self._get_kernel()
self.gp = self._get_gp()
self.conditional = dict() # type: Dict[int, bool]
self.impute_values = dict() # type: Dict[int, float]
def _get_kernel(self) -> Kernel:
raise NotImplementedError()
def _get_gp(self) -> GaussianProcessRegressor:
raise NotImplementedError()
def _normalize_y(self, y: np.ndarray) -> np.ndarray:
"""Normalize data to zero mean unit standard deviation.
Parameters
----------
y : np.ndarray
Targets for the Gaussian process
Returns
-------
np.ndarray
"""
self.mean_y_ = np.mean(y)
self.std_y_ = np.std(y)
if self.std_y_ == 0:
self.std_y_ = 1
return (y - self.mean_y_) / self.std_y_
def _untransform_y(
self,
y: np.ndarray,
var: Optional[np.ndarray] = None,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""Transform zeromean unit standard deviation data into the regular space.
This function should be used after a prediction with the Gaussian process which was trained on normalized data.
Parameters
----------
y : np.ndarray
Normalized data.
var : np.ndarray (optional)
Normalized variance
Returns
-------
np.ndarray on Tuple[np.ndarray, np.ndarray]
"""
y = y | |
# Due to a rename, the Episode table is called Event in the database.
IndexedTable(EPISODE, 'ev', read_units=200, write_units=10, name_in_db="Event",
columns=[HashKeyColumn('episode_id', 'ei', 'S'),
Column('parent_ep_id', 'pa', 'S', SecondaryIndexer(), read_only=True),
Column('user_id', 'ui', 'N', SecondaryIndexer(), read_only=True),
Column('viewpoint_id', 'vi', 'S', SecondaryIndexer(), read_only=True),
Column('publish_timestamp', 'pu', 'N'),
Column('timestamp', 'cr', 'N'),
Column('title', 'ti', 'S'),
Column('description', 'de', 'S'),
LatLngColumn('location', 'lo'),
PlacemarkColumn('placemark', 'pl')]),
# Sorts all viewpoints followed by a user in order of the date of
# on which the last activity was added. Viewpoints updated on the
# same day are in undefined order. Sort is in descending order, with
# viewpoints most recently updated coming first. The query_followed
# method returns results in this ordering. Note that paging may result
# in missed followed records, as updates to a viewpoint may cause the
# corresponding record to "jump ahead" in time past the current paging
# bookmark. 'date_updated' is a timestamp truncated to a day boundary.
# 'sort_key' is a concatenation of the 'date_updated' field and the
# viewpoint id.
IndexedTable(FOLLOWED, 'fd', read_units=200, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('date_updated', 'du', 'N'),
Column('viewpoint_id', 'vi', 'S', read_only=True)]),
# Key is a composite of (user-id, viewpoint-id). The 'labels' set
# specifies the features of the relation between the user and
# viewpoint: ('admin', 'contribute'). 'adding_user_id' contains the id
# of the user who added this follower, and 'timestamp' the time at which
# the follower was added. 'viewed_seq' is the sequence number of the last
# viewpoint update that has been 'read' by this follower. The last
# viewpoint update is tracked by the 'update_seq' attribute on Viewpoint.
IndexedTable(FOLLOWER, 'fo', read_units=400, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('viewpoint_id', 'vi', 'S', SecondaryIndexer()),
Column('timestamp', 'ti', 'N'),
Column('adding_user_id', 'aui', 'N'),
SetColumn('labels', 'la', 'SS'),
Column('viewed_seq', 'vs', 'N')]),
# Key is composite of user-id / friend-id. "colocated_shares" and
# "total_shares" are decaying stats that track the number of photo
# opportunities where sharing occurred. 'last_colocated' and
# 'last_share' are timestamps for computing decay. Friend status is
# one of {friend,blocked,muted}.
Table(FRIEND, 'fr', read_units=50, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('friend_id', 'fi', 'N'),
Column('name', 'na', 'S'),
Column('nickname', 'nn', 'S'),
Column('colocated_shares', 'cs', 'N'),
Column('last_colocated', 'lc', 'N'),
Column('total_shares', 'ts', 'N'),
Column('last_share', 'ls', 'N'),
Column('status', 'st', 'S')]),
# Tracks the number of incorrect attempts that have been made to guess some
# secret, such as a password or an access code. 'guess_id' is of the form
# <type>:<id>, where <type> is one of these:
#
# url:<group-id> - Limits number of attempts that can be made to guess a
# valid ShortURL within any particular 24-hour period.
#
# pw:<user-id> - Limits number of attempts that can be made to guess a
# particular user's password within any particular 24-hour
# period.
#
# em:<user-id> - Limits number of attempts that can be made to guess
# access tokens e-mailed to a particular user within any
# particular 24-hour period.
#
# ph:<user-id> - Limits number of attempts that can be made to guess
# access tokens sent in SMS messages to a user within any
# particular 24-hour period.
#
# The 'guesses' field tracks the number of incorrect guesses that have been
# made so far. The 'expires' field stores the time at which the guesses count
# can be reset to 0.
Table(GUESS, 'gu', read_units=50, write_units=10,
columns=[HashKeyColumn('guess_id', 'gi', 'S'),
Column('expires', 'ex', 'N'),
Column('guesses', 'gu', 'N')]),
# Key is a composite of (group_key, timestamp), where group_key is the
# same key used to collect machine metrics in the metrics table. The
# intention is that for each metrics group_key, a single health report
# will be generated summarizing problems across all machines in that group.
#
# Alerts and Warnings are string sets which describe any problems detected
# from the metrics information. If no problems are detected, this record
# will be sparse.
Table(HEALTH_REPORT, 'hr', read_units=10, write_units=5,
columns=[HashKeyColumn('group_key', 'gk', 'S'),
RangeKeyColumn('timestamp', 'ts', 'N'),
SetColumn('alerts', 'as', 'SS'),
SetColumn('warnings', 'ws', 'SS')]),
# Key is ID type (e.g. op-id, photo-id, user-id, episode-id).
Table(ID_ALLOCATOR, 'ia', read_units=10, write_units=10,
columns=[HashKeyColumn('id_type', 'it', 'S'),
Column('next_id', 'ni', 'N')]),
# Key is identity. User-id is indexed to provide quick queries for the
# list of identities associated with a viewfinder account. The token
# allows access to external resources associated with the identity.
# 'last_fetch' specifies the last time that the contacts were
# fetched for this identity. 'authority' is one of ('Facebook', 'Google'
# 'Viewfinder', etc.) and identifies the trusted authentication authority.
#
# The complete set of attributes (if any) returned when an
# identity was authenticated is stored as a json-encoded dict in
# 'json_attrs'. Some of these may be taken to populate the
# demographic and informational attributes of the User table.
#
# The 'access_token' and 'refresh_token' fields store any tokens used to
# access the authority, with 'expires' tracking the lifetime of the
# token.
#
# The 'auth_throttle' field limits the number of auth email/sms messages
# that can be sent within a certain period of time.
IndexedTable(IDENTITY, 'id', read_units=50, write_units=10,
columns=[HashKeyColumn('key', 'ke', 'S'),
Column('user_id', 'ui', 'N', SecondaryIndexer()),
JSONColumn('json_attrs', 'ja'),
Column('last_fetch', 'lf', 'N'),
Column('authority', 'au', 'S'),
Column('access_token', 'at', 'S'),
Column('refresh_token', 'rt', 'S'),
Column('expires', 'ex', 'N'),
JSONColumn('auth_throttle', 'th'),
# TODO(Andy): Remove these attributes, as they are now deprecated.
Column('access_code', 'ac', 'S', SecondaryIndexer()),
Column('expire_code', 'xc', 'N'),
Column('token_guesses', 'tg', 'N'),
Column('token_guesses_time', 'gt', 'N')]),
# A lock is acquired in order to control concurrent access to
# a resource. The 'lock_id' is a composite of the type of the
# resource and its unique id. The 'owner_id' is a string that
# uniquely identifies the holder of the lock. 'resource_data'
# is resource-specific information that is provided by the
# owner and stored with the lock. The 'expiration' is the time
# (UTC) at which the lock is assumed to have been abandoned by
# the owner and can be taken over by another owner.
#
# 'acquire_failures' tracks the number of times other agents
# tried to acquire the lock while it was held.
Table(LOCK, 'lo', read_units=50, write_units=10,
columns=[HashKeyColumn('lock_id', 'li', 'S'),
Column('owner_id', 'oi', 'S'),
Column('expiration', 'ex', 'N'),
Column('acquire_failures', 'af', 'N'),
Column('resource_data', 'rd', 'S')]),
# Metrics represent a timestamped payload of performance metrics
# from a single machine running viewfinder. The metrics key is a
# composite of (group_key, sort_key). The payload column is a serialized
# dictionary describing the performance metrics that were captured from
# the machine.
#
# The group_key for a metric is intended to organize metrics by the way
# they are queried. For instance, a group key might contain all
# metrics for all machines in an EC2 region, or a more specific division
# than that.
#
# The sort_key is a composite of the timestamp and machine id - the
# intention is that records will be queried by timestamp, while machine_id
# is simply included in the key to differentiate records with the same
# timestamp from different machines.
IndexedTable(METRIC, 'mt', read_units=50, write_units=10,
columns=[HashKeyColumn('group_key', 'gk', 'S'),
RangeKeyColumn('sort_key', 'sk', 'S'),
Column('machine_id', 'mi', 'S', SecondaryIndexer()),
Column('timestamp', 'ts', 'N'),
Column('payload', 'p', 'S')]),
# Notifications are messages to deliver to devices hosting the
# viewfinder client, whether mobile, desktop, web application or
# otherwise. Key is a composite of (user-id and allocated
# notification id--taken from user's uu_id sequence). Other
# fields record the name, id, and timestamp of the operation that
# resulted in the notification, as well as the user and device
# that started it. The badge attribute records the value of the
# "push badge" on client devices at the time that notification
# was recorded. The invalidate attribute is a JSON-encoded
# INVALIDATE structure, as defined in json_schema.py.
Table(NOTIFICATION, 'no', read_units=50, write_units=10,
columns=[HashKeyColumn('user_id', 'ui', 'N'),
RangeKeyColumn('notification_id', 'ni', 'N'),
Column('name', 'na', 'S'),
Column('timestamp', 'ti', 'N'),
Column('sender_id', 'si', 'N'),
Column('sender_device_id', 'sd', 'N'),
Column('badge', 'ba', 'N'),
Column('invalidate', 'in', | |
must not contain -g. This confuses gcc 5.1. (Note that it
# would seem that gcc 5.1 with "-g" does not produce debugging
# info in a format that gdb 4.7.1 can read.)
mk.definition('CFLAGS_AS', '$(patsubst -g,,$(CFLAGS))')
# the rule that transforms %.c into %.o, by compiling it to
# %.s, then applying trackgcroot to get %.lbl.s and %.gcmap, and
# finally by using the assembler ($(CC) again for now) to get %.o
mk.rule('%.o %.gcmap', '%.c', [
'$(CC) $(CFLAGS) $(CFLAGSEXTRA) -frandom-seed=$< '
'-o $*.s -S $< $(INCLUDEDIRS)',
'$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py '
'-t $*.s > $*.gctmp',
'$(CC) $(CFLAGS_AS) -o $*.o -c $*.lbl.s',
'mv $*.gctmp $*.gcmap',
'rm $*.s $*.lbl.s'])
# this is for manually written assembly files which needs to be parsed by asmgcc
mk.rule('%.o %.gcmap', '%.vmprof.s', [
'$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py '
'-t $*.vmprof.s > $*.gctmp',
'$(CC) -o $*.o -c $*.vmprof.lbl.s',
'mv $*.gctmp $*.gcmap',
'rm $*.vmprof.lbl.s'])
# the rule to compute gcmaptable.s
mk.rule('gcmaptable.s', '$(GCMAPFILES)',
[
'$(PYTHON) $(RPYDIR)/translator/c/gcc/trackgcroot.py '
'$(GCMAPFILES) > $@.tmp',
'mv $@.tmp $@'])
else:
if self.translator.platform.name == 'msvc':
mk.definition('DEBUGFLAGS', '-MD -Zi')
else:
if self.config.translation.shared:
mk.definition('DEBUGFLAGS', '-O1 -g -fPIC')
else:
mk.definition('DEBUGFLAGS', '-O1 -g')
if self.translator.platform.name == 'msvc':
mk.rule('debug_target', '$(DEFAULT_TARGET)', 'rem')
else:
mk.rule('debug_target', '$(DEFAULT_TARGET)', '#')
mk.write()
#self.translator.platform,
# ,
# self.eci, profbased=self.getprofbased()
self.executable_name = mk.exe_name
# ____________________________________________________________
SPLIT_CRITERIA = 65535 # support VC++ 7.2
#SPLIT_CRITERIA = 32767 # enable to support VC++ 6.0
MARKER = '/*/*/' # provide an easy way to split after generating
class SourceGenerator:
one_source_file = True
def __init__(self, database):
self.database = database
self.extrafiles = []
self.headers_to_precompile = []
self.path = None
self.namespace = NameManager()
def set_strategy(self, path, split=True):
all_nodes = list(self.database.globalcontainers())
# split off non-function nodes. We don't try to optimize these, yet.
funcnodes = []
othernodes = []
for node in all_nodes:
if node.nodekind == 'func':
funcnodes.append(node)
else:
othernodes.append(node)
if split:
self.one_source_file = False
self.funcnodes = funcnodes
self.othernodes = othernodes
self.path = path
def uniquecname(self, name):
assert name.endswith('.c')
return self.namespace.uniquename(name[:-2]) + '.c'
def makefile(self, name):
log.writing(name)
filepath = self.path.join(name)
if name.endswith('.c'):
self.extrafiles.append(filepath)
if name.endswith('.h'):
self.headers_to_precompile.append(filepath)
return filepath.open('w')
def getextrafiles(self):
return self.extrafiles
def getothernodes(self):
return self.othernodes[:]
def getbasecfilefornode(self, node, basecname):
# For FuncNode instances, use the python source filename (relative to
# the top directory):
def invent_nice_name(g):
# Lookup the filename from the function.
# However, not all FunctionGraph objs actually have a "func":
if hasattr(g, 'func'):
if g.filename.endswith('.py'):
localpath = py.path.local(g.filename)
pypkgpath = localpath.pypkgpath()
if pypkgpath:
relpypath = localpath.relto(pypkgpath.dirname)
assert relpypath, ("%r should be relative to %r" %
(localpath, pypkgpath.dirname))
if len(relpypath.split(os.path.sep)) > 2:
# pypy detail to agregate the c files by directory,
# since the enormous number of files was causing
# memory issues linking on win32
return os.path.split(relpypath)[0] + '.c'
return relpypath.replace('.py', '.c')
return None
if hasattr(node.obj, 'graph'):
# Regular RPython functions
name = invent_nice_name(node.obj.graph)
if name is not None:
return name
elif node._funccodegen_owner is not None:
# Data nodes that belong to a known function
graph = getattr(node._funccodegen_owner, 'graph', None)
name = invent_nice_name(graph)
if name is not None:
return "data_" + name
return basecname
def splitnodesimpl(self, basecname, nodes, nextra, nbetween,
split_criteria=SPLIT_CRITERIA):
# Gather nodes by some criteria:
nodes_by_base_cfile = {}
for node in nodes:
c_filename = self.getbasecfilefornode(node, basecname)
if c_filename in nodes_by_base_cfile:
nodes_by_base_cfile[c_filename].append(node)
else:
nodes_by_base_cfile[c_filename] = [node]
# produce a sequence of nodes, grouped into files
# which have no more than SPLIT_CRITERIA lines
for basecname in sorted(nodes_by_base_cfile):
iternodes = iter(nodes_by_base_cfile[basecname])
done = [False]
def subiter():
used = nextra
for node in iternodes:
impl = '\n'.join(list(node.implementation())).split('\n')
if not impl:
continue
cost = len(impl) + nbetween
yield node, impl
del impl
if used + cost > split_criteria:
# split if criteria met, unless we would produce nothing.
raise StopIteration
used += cost
done[0] = True
while not done[0]:
yield self.uniquecname(basecname), subiter()
@contextlib.contextmanager
def write_on_included_file(self, f, name):
fi = self.makefile(name)
print >> f, '#include "%s"' % name
yield fi
fi.close()
@contextlib.contextmanager
def write_on_maybe_separate_source(self, f, name):
print >> f, '/* %s */' % name
if self.one_source_file:
yield f
else:
fi = self.makefile(name)
yield fi
fi.close()
def gen_readable_parts_of_source(self, f):
split_criteria_big = SPLIT_CRITERIA
if py.std.sys.platform != "win32":
if self.database.gcpolicy.need_no_typeptr():
pass # XXX gcc uses toooooons of memory???
else:
split_criteria_big = SPLIT_CRITERIA * 4
#
# All declarations
#
with self.write_on_included_file(f, 'structdef.h') as fi:
gen_structdef(fi, self.database)
with self.write_on_included_file(f, 'forwarddecl.h') as fi:
gen_forwarddecl(fi, self.database)
with self.write_on_included_file(f, 'preimpl.h') as fi:
gen_preimpl(fi, self.database)
#
# Implementation of functions and global structures and arrays
#
print >> f
print >> f, '/***********************************************************/'
print >> f, '/*** Implementations ***/'
print >> f
print >> f, '#define PYPY_FILE_NAME "%s"' % os.path.basename(f.name)
print >> f, '#include "src/g_include.h"'
print >> f
nextralines = 11 + 1
for name, nodeiter in self.splitnodesimpl('nonfuncnodes.c',
self.othernodes,
nextralines, 1):
with self.write_on_maybe_separate_source(f, name) as fc:
if fc is not f:
print >> fc, '/***********************************************************/'
print >> fc, '/*** Non-function Implementations ***/'
print >> fc
print >> fc, '#include "common_header.h"'
print >> fc, '#include "structdef.h"'
print >> fc, '#include "forwarddecl.h"'
print >> fc, '#include "preimpl.h"'
print >> fc
print >> fc, '#include "src/g_include.h"'
print >> fc
print >> fc, MARKER
for node, impl in nodeiter:
print >> fc, '\n'.join(impl)
print >> fc, MARKER
print >> fc, '/***********************************************************/'
nextralines = 12
for name, nodeiter in self.splitnodesimpl('implement.c',
self.funcnodes,
nextralines, 1,
split_criteria_big):
with self.write_on_maybe_separate_source(f, name) as fc:
if fc is not f:
print >> fc, '/***********************************************************/'
print >> fc, '/*** Implementations ***/'
print >> fc
print >> fc, '#include "common_header.h"'
print >> fc, '#include "structdef.h"'
print >> fc, '#include "forwarddecl.h"'
print >> fc, '#include "preimpl.h"'
print >> fc, '#define PYPY_FILE_NAME "%s"' % name
print >> fc, '#include "src/g_include.h"'
print >> fc
print >> fc, MARKER
for node, impl in nodeiter:
print >> fc, '\n'.join(impl)
print >> fc, MARKER
print >> fc, '/***********************************************************/'
print >> f
def gen_structdef(f, database):
structdeflist = database.getstructdeflist()
print >> f, '/***********************************************************/'
print >> f, '/*** Structure definitions ***/'
print >> f
print >> f, "#ifndef _PYPY_STRUCTDEF_H"
print >> f, "#define _PYPY_STRUCTDEF_H"
for node in structdeflist:
if hasattr(node, 'forward_decl'):
if node.forward_decl:
print >> f, node.forward_decl
elif node.name is not None:
print >> f, '%s %s;' % (node.typetag, node.name)
print >> f
for node in structdeflist:
for line in node.definition():
print >> f, line
gen_threadlocal_structdef(f, database)
print >> f, "#endif"
def gen_threadlocal_structdef(f, database):
from rpython.translator.c.support import cdecl
print >> f
bk = database.translator.annotator.bookkeeper
fields = list(bk.thread_local_fields)
fields.sort(key=lambda field: field.fieldname)
for field in fields:
print >> f, ('#define RPY_TLOFS_%s offsetof(' % field.fieldname +
'struct pypy_threadlocal_s, %s)' % field.fieldname)
print >> f, 'struct pypy_threadlocal_s {'
print >> f, '\tint ready;'
print >> f, '\tchar *stack_end;'
print >> f, '\tstruct pypy_threadlocal_s *prev, *next;'
# note: if the four fixed fields above are changed, you need
# to adapt threadlocal.c's linkedlist_head declaration too
for field in fields:
typename = database.gettype(field.FIELDTYPE)
print >> f, '\t%s;' % cdecl(typename, field.fieldname)
print >> f, '};'
print >> f
def gen_forwarddecl(f, database):
print >> f, '/***********************************************************/'
print >> f, '/*** Forward declarations ***/'
print >> f
print >> f, "#ifndef _PYPY_FORWARDDECL_H"
print >> f, "#define _PYPY_FORWARDDECL_H"
for node in database.globalcontainers():
for line in node.forward_declaration():
print >> f, line
print >> f, "#endif"
def gen_preimpl(f, database):
f.write('#ifndef _PY_PREIMPL_H\n#define _PY_PREIMPL_H\n')
if database.translator is None or database.translator.rtyper is None:
return
preimplementationlines = pre_include_code_lines(
database, database.translator.rtyper)
for line in preimplementationlines:
print >> f, line
f.write('#endif /* _PY_PREIMPL_H */\n')
def gen_startupcode(f, database):
# generate the start-up code and put it into a function
print >> f, 'void RPython_StartupCode(void) {'
bk = database.translator.annotator.bookkeeper
if bk.thread_local_fields:
print >> f, '\tRPython_ThreadLocals_ProgramInit();'
for line in database.gcpolicy.gc_startup_code():
print >> f,"\t" + line
# put float infinities in global constants, we should not have so many of them for now to make
# a table+loop preferable
for dest, value in database.late_initializations:
print >> f, "\t%s = %s;" % (dest, value)
for node in database.containerlist:
lines = list(node.startupcode())
if lines:
for line in lines:
print >> f, '\t'+line
print >> f, '}'
def commondefs(defines):
from rpython.rlib.rarithmetic import LONG_BIT, LONGLONG_BIT
defines['PYPY_LONG_BIT'] = LONG_BIT
defines['PYPY_LONGLONG_BIT'] = LONGLONG_BIT
def add_extra_files(eci):
srcdir = py.path.local(__file__).join('..', 'src')
files | |
#!/usr/bin/env python
#-----------------------------------------------------------------------------
# This file is part of 'SLAC Firmware Standard Library'.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of 'SLAC Firmware Standard Library', including this file,
# may be copied, modified, propagated, or distributed except according to
# the terms contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import rogue
import pyrogue as pr
class ClockManager(pr.Device):
def __init__( self,
name = "ClockManager",
description = "MMCM and PLL Dynamic Reconfiguration (refer to XAPP888: https://www.xilinx.com/support/documentation/application_notes/xapp888_7Series_DynamicRecon.pdf)",
type = None, # [MMCME2,PLLE2,MMCME3,PLLE3,MMCME4,PLLE4]
**kwargs):
super().__init__(name=name, description=description, **kwargs)
# Determine the number of clkout
if (type is 'PLLE3') or (type is 'PLLE4'):
numClkOut = 2
elif (type is 'PLLE2'):
numClkOut = 6
elif (type is 'MMCME2') or (type is 'MMCME3') or (type is 'MMCME4'):
numClkOut = 7
else:
raise ValueError('ClockManager: Invalid type (%s)' % (type) )
# Determine if UltraScale or not
UltraScale = False if (type is 'MMCME2') or (type is 'PLLE2') else True
##############################################################################
# ClkReg1 Bitmap for CLKOUT[6:0]
##############################################################################
# CLKOUT0 Register 1 (Address=0x08)
# CLKOUT1 Register 1 (Address=0x0A)
# CLKOUT2 Register 1 (Address=0x0C): Not available for PLLE3 or PLLE4
# CLKOUT3 Register 1 (Address=0x0E): Not available for PLLE3 or PLLE4
# CLKOUT4 Register 1 (Address=0x10): Not available for PLLE3 or PLLE4
# CLKOUT5 Register 1 (Address=0x06)
# CLKOUT6 Register 1 (Address=0x12): Not available for PLLE2, PLLE3, or PLLE4
ClkReg1 = [0x08,0x0A,0x0C,0x0E,0x10,0x06,0x12]
for i in range(numClkOut):
if (type is not 'PLLE3') and (type is not 'PLLE4'):
self.add(pr.RemoteVariable(
name = f'PHASE_MUX[{i}]',
description = """
Chooses an initial phase offset for the clock output, the
resolution is equal to 1/8 VCO period. Not available in
UltraScale PLLE3 and UltraScale+ PLLE4.
""",
offset = (ClkReg1[i] << 2),
bitSize = 3,
bitOffset = 13,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = f'HIGH_TIME[{i}]',
description = """
Sets the amount of time in VCO cycles that the clock output
remains High.
""",
offset = (ClkReg1[i] << 2),
bitSize = 6,
bitOffset = 6,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = f'LOW_TIME[{i}]',
description = """
Sets the amount of time in VCO cycles that the clock output
remains Low.
""",
offset = (ClkReg1[i] << 2),
bitSize = 6,
bitOffset = 0,
mode = "RW",
))
##############################################################################
# CLKFBOUT Register 1 (Address=0x14)
##############################################################################
if (type is not 'PLLE3') and (type is not 'PLLE4'):
self.add(pr.RemoteVariable(
name = 'PHASE_MUX_FB',
description = """
Chooses an initial phase offset for the clock output, the
resolution is equal to 1/8 VCO period. Not available in
UltraScale PLLE3 and UltraScale+ PLLE4.
""",
offset = (0x14 << 2),
bitSize = 3,
bitOffset = 13,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = 'HIGH_TIME_FB',
description = """
Sets the amount of time in VCO cycles that the clock output
remains High.
""",
offset = (0x14 << 2),
bitSize = 6,
bitOffset = 6,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = 'LOW_TIME_FB',
description = """
Sets the amount of time in VCO cycles that the clock output
remains Low.
""",
offset = (0x14 << 2),
bitSize = 6,
bitOffset = 0,
mode = "RW",
))
##############################################################################
# ClkReg2 Bitmap for CLKOUT[6:0]
##############################################################################
# CLKOUT0 Register 2 (Address=0x09)
# CLKOUT1 Register 2 (Address=0x0B)
# CLKOUT2 Register 2 (Address=0x0D): Not available for PLLE3 or PLLE4
# CLKOUT3 Register 2 (Address=0x0F): Not available for PLLE3 or PLLE4
# CLKOUT4 Register 2 (Address=0x11): Not available for PLLE3 or PLLE4
# CLKOUT5 Register 2 (Address=0x07)
# CLKOUT6 Register 2 (Address=0x13): Not available for PLLE2, PLLE3, or PLLE4
ClkReg2 = [0x09,0x0B,0x0D,0x0F,0x11,0x07,0x13]
for i in range(numClkOut):
###############################
# CLKOUT0
###############################
if (i==0):
if (type is 'MMCME2') or (type is 'MMCME3') or (type is 'MMCME4'):
self.add(pr.RemoteVariable(
name = 'FRAC[0]',
description = """
Fractional divide counter setting for CLKOUT0. Equivalent to
additional divide of 1/8.
""",
offset = (ClkReg2[i] << 2),
bitSize = 3,
bitOffset = 12,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = 'FRAC_EN[0]',
description = """
Enable fractional divider circuitry for CLKOUT0.
""",
offset = (ClkReg2[i] << 2),
bitSize = 1,
bitOffset = 11,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = 'FRAC_WF_R[0]',
description = """
Adjusts CLKOUT0 rising edge for improved duty cycle accuracy
when using fractional counter.
""",
offset = (ClkReg2[i] << 2),
bitSize = 1,
bitOffset = 10,
mode = "RW",
))
###############################
# CLKOUT1
###############################
if (i==1):
if (type is 'PLLE3') or (type is 'PLLE4'):
self.add(pr.RemoteVariable(
name = f'CLKOUTPHY_MODE[{i}]',
description = """
For the PLLE3 and PLLE4, determines CLKPHYOUT
frequency based on the VCO frequency.
""",
offset = (ClkReg2[i] << 2),
bitSize = 2,
bitOffset = 13,
mode = "RW",
))
##############################################
# CLKOUT5 register with CLKOUT0 Configurations
##############################################
if (i==5):
if (type is 'MMCME2') or (type is 'MMCME3') or (type is 'MMCME4'):
self.add(pr.RemoteVariable(
name = 'PHASE_MUX_F_CLKOUT[0]',
description = """
CLKOUT0 data required when using fractional
counter. Chooses an initial phase offset for the
falling edge of the clock output. The resolution is
equal to 1/8 VCO period. Not available in UltraScale
PLLE3 and UltraScale+ PLLE4.
""",
offset = (ClkReg2[i] << 2),
bitSize = 3,
bitOffset = 13 if UltraScale else 11,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = 'FRAC_WF_F_CLKOUT[0]',
description = """
Adjusts CLKOUT0 falling edge for improved duty
cycle accuracy when using fractional counter.
""",
offset = (ClkReg2[i] << 2),
bitSize = 1,
bitOffset = 12 if UltraScale else 10,
mode = "RW",
))
###############################################
# CLKOUT6 register with CLKFBOUT Configurations
###############################################
if (i==6):
if (type is 'MMCME2') or (type is 'MMCME3') or (type is 'MMCME4'):
self.add(pr.RemoteVariable(
name = 'PHASE_MUX_F_CLKOUT_FB',
description = """
CLKFBOUT data required when using fractional
counter. Chooses an initial phase offset for the
falling edge of the clock output. The resolution is
equal to 1/8 VCO period. Not available in UltraScale
PLLE3 and UltraScale+ PLLE4.
""",
offset = (ClkReg2[i] << 2),
bitSize = 3,
bitOffset = 13 if UltraScale else 11,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = 'FRAC_WF_F_CLKOUT_FB',
description = """
Adjusts CLKFBOUT falling edge for improved duty
cycle accuracy when using fractional counter.
""",
offset = (ClkReg2[i] << 2),
bitSize = 1,
bitOffset = 12 if UltraScale else 10,
mode = "RW",
))
##############################################################################
self.add(pr.RemoteVariable(
name = f'MX[{i}]',
description = """
Must be set to 2'b00.
""",
offset = (ClkReg2[i] << 2),
bitSize = 2,
bitOffset = 8,
mode = "WO",
))
self.add(pr.RemoteVariable(
name = f'EDGE[{i}]',
description = """
Chooses the edge that the High Time counter transitions on.
""",
offset = (ClkReg2[i] << 2),
bitSize = 1,
bitOffset = 7,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = f'NO_COUNT[{i}]',
description = """
Bypasses the High and Low Time counters.
""",
offset = (ClkReg2[i] << 2),
bitSize = 1,
bitOffset = 6,
mode = "RW",
))
self.add(pr.RemoteVariable(
name = f'DELAY_TIME[{i}]',
description = """
Phase offset with a resolution equal to the VCO period.
""",
offset = (ClkReg2[i] << 2),
bitSize = 6,
bitOffset = 0,
mode = "RW",
))
##############################################################################
if (type is 'MMCME2') or (type is 'MMCME3') or (type is 'MMCME4'):
self.add(pr.RemoteVariable(
name = 'FRAC_FB',
description = """
Fractional divide counter setting for CLKFBOUT. Equivalent to
additional divide of 1/8.
""",
offset = (0x15 << 2),
bitSize = 3,
bitOffset = 12,
mode = "RW",
))
self.add(pr.RemoteVariable(
name | |
import os
import sys
import pickle
import numpy as np
import xml.etree.ElementTree as ET
import random
import svgwrite
from IPython.display import SVG, display
import tensorflow as tf
def get_bounds( data, factor ):
min_x = 0
max_x = 0
min_y = 0
max_y = 0
abs_x = 0
abs_y = 0
for i in range( len( data ) ):
x = float( data[ i, 0 ] )/factor
y = float( data[ i, 1 ] )/factor
abs_x += x
abs_y += y
min_x = min( min_x, abs_x )
min_y = min( min_y, abs_y )
max_x = max( max_x, abs_x )
max_y = max( max_y, abs_y )
return ( min_x, max_x, min_y, max_y )
# version where each path is entire stroke ( smaller svg size, but have to keep same color )
def draw_strokes( data, factor = 10, svg_filename = 'sample.svg' ):
min_x, max_x, min_y, max_y = get_bounds( data, factor )
dims = ( 50 + max_x - min_x, 50 + max_y - min_y )
dwg = svgwrite.Drawing( svg_filename, size = dims )
dwg.add( dwg.rect( insert = ( 0, 0 ), size = dims, fill = 'white' ) )
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
p = "M%s, %s " % ( abs_x, abs_y )
command = "m"
for i in range( len( data ) ):
if ( lift_pen == 1 ):
command = "m"
elif ( command != "l" ):
command = "l"
else:
command = ""
x = float( data[ i, 0 ] )/factor
y = float( data[ i, 1 ] )/factor
lift_pen = data[ i, 2 ]
p += command+str( x )+", "+str( y )+" "
the_color = "black"
stroke_width = 1
dwg.add( dwg.path( p ).stroke( the_color, stroke_width ).fill( "none" ) )
dwg.save( )
display( SVG( dwg.tostring( ) ) )
def draw_strokes_eos_weighted( stroke, param, factor = 10, svg_filename = 'sample[ A_eos.svg' ):
c_data_eos = np.zeros( ( len( stroke ), 3 ) )
for i in range( len( param ) ):
c_data_eos[ i, : ] = ( 1-param[ i ][ 6 ][ 0 ] )*225 # make color gray scale, darker = more likely to eos
draw_strokes_custom_color( stroke, factor = factor, svg_filename = svg_filename, color_data = c_data_eos, stroke_width = 3 )
def draw_strokes_random_color( stroke, factor = 10, svg_filename = 'sample_random_color.svg', per_stroke_mode = True ):
c_data = np.array( np.random.rand( len( stroke ), 3 )*240, dtype = np.uint8 )
if per_stroke_mode:
switch_color = False
for i in range( len( stroke ) ):
if switch_color == False and i > 0:
c_data[ i ] = c_data[ i-1 ]
if stroke[ i, 2 ] < 1: # same strike
switch_color = False
else:
switch_color = True
draw_strokes_custom_color( stroke, factor = factor, svg_filename = svg_filename, color_data = c_data, stroke_width = 2 )
def draw_strokes_custom_color( data, factor = 10, svg_filename = 'test.svg', color_data = None, stroke_width = 1 ):
min_x, max_x, min_y, max_y = get_bounds( data, factor )
dims = ( 50 + max_x - min_x, 50 + max_y - min_y )
dwg = svgwrite.Drawing( svg_filename, size = dims )
dwg.add( dwg.rect( insert = ( 0, 0 ), size = dims, fill = 'white' ) )
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
for i in range( len( data ) ):
x = float( data[ i, 0 ] )/factor
y = float( data[ i, 1 ] )/factor
prev_x = abs_x
prev_y = abs_y
abs_x += x
abs_y += y
if ( lift_pen == 1 ):
p = "M "+str( abs_x )+", "+str( abs_y )+" "
else:
p = "M +"+str( prev_x )+", "+str( prev_y )+" L "+str( abs_x )+", "+str( abs_y )+" "
lift_pen = data[ i, 2 ]
the_color = "black"
if ( color_data is not None ):
the_color = "rgb( "+str( int( color_data[ i, 0 ] ) )+", "+str( int( color_data[ i, 1 ] ) )+", "+str( int( color_data[ i, 2 ] ) )+" )"
dwg.add( dwg.path( p ).stroke( the_color, stroke_width ).fill( the_color ) )
dwg.save( )
display( SVG( dwg.tostring( ) ) )
def draw_strokes_pdf( data, param, factor = 10, svg_filename = 'sample_pdf.svg' ):
min_x, max_x, min_y, max_y = get_bounds( data, factor )
dims = ( 50 + max_x - min_x, 50 + max_y - min_y )
dwg = svgwrite.Drawing( svg_filename, size = dims )
dwg.add( dwg.rect( insert = ( 0, 0 ), size = dims, fill = 'white' ) )
abs_x = 25 - min_x
abs_y = 25 - min_y
num_mixture = len( param[ 0 ][ 0 ] )
for i in range( len( data ) ):
x = float( data[ i, 0 ] )/factor
y = float( data[ i, 1 ] )/factor
for k in range( num_mixture ):
pi = param[ i ][ 0 ][ k ]
if pi > 0.01: # optimisation, ignore pi's less than 1% chance
mu1 = param[ i ][ 1 ][ k ]
mu2 = param[ i ][ 2 ][ k ]
s1 = param[ i ][ 3 ][ k ]
s2 = param[ i ][ 4 ][ k ]
sigma = np.sqrt( s1*s2 )
dwg.add( dwg.circle( center = ( abs_x+mu1*factor, abs_y+mu2*factor ), r = int( sigma*factor ) ).fill( 'red', opacity = pi/( sigma*sigma*factor ) ) )
prev_x = abs_x
prev_y = abs_y
abs_x += x
abs_y += y
dwg.save( )
display( SVG( dwg.tostring( ) ) )
class DataLoader( ):
def getRandValue( self ):
value = self.randvalues[ self.randvaluepointer ]
self.randvaluepointer += 1
if ( self.randvaluepointer >= self.nrrandvalues ):
self.randvaluepointer = 0
return value
def createRandValues( self ):
self.nrrandvalues = 1000
self.randvalues = np.zeros( ( self.nrrandvalues ), dtype = np.float32 )
for i in range( self.nrrandvalues ):
value = random.random( )
self.randvalues[ i ] = value
self.randvaluepointer = 0
def getClassLabels( self ):
if self.train:
fn = self.data_dir + "trainlabels.txt"
else:
fn = self.data_dir + "testlabels.txt"
classlabels = np.loadtxt( fn )
classlabels = classlabels[ :self.nrinputfiles ]
return classlabels
def findAvailableExamples( self, args ):
self.availableExamples = [ ]
findexamples = True
if findexamples:
for i in range( len( self.classlabels ) ):
if ( self.classlabels[ i ] < args.curnrdigits ):
self.availableExamples.append( i )
self.availableExamples = np.array( self.availableExamples )
def __init__( self, datadir, args, totnrfiles, curnrexamples, seqlength = 0, train = 1, file_label = "", print_input = 0, rangemin = 0, rangelen = 0 ):
random.seed( 100*args.runnr )
np.random.seed( 100*args.runnr )
tf.set_random_seed( 100*args.runnr )
self.args = args
self.data_dir = datadir
self.train = train
if self.train:
self.traintest = "train"
else:
self.traintest = "test"
self.rangemin = rangemin
self.rangelen = rangelen
self.nrinputfiles = totnrfiles
self.curnrexamples = curnrexamples
self.nrseq_per_batch = args.nrseq_per_batch
self.file_label = file_label
self.print_input = print_input
self.nrinputvars_data = self.getInputVectorLength( args )
self.max_seq_length = args.max_seq_length
self.nrsequenceinputs = 4 #dx dy eos eod
self.nrauxinputvars = args.nrClassOutputVars #either [ 0..9 dx dy eos eod ] or [ dx dy eos ]
strokedatafile = os.path.join( self.data_dir, "strokes_"+self.traintest+"ing_data"+ file_label+args.explabel+ ".cpkl" )
raw_data_dir = self.data_dir+"/lineStrokes"
print ( "creating data cpkl file from source data" )
self.preprocess( args, raw_data_dir, strokedatafile )
if ( seqlength > 0 ): #provided
self.seq_length = seqlength
else:
self.seq_length = min( self.max_seq_length, args.maxdigitlength_nrpoints )
self.load_preprocessed( args, strokedatafile )
self.classlabels = self.getClassLabels( )
self.findAvailableExamples( args )
self.nrbatches_per_epoch = max( 1, int( self.curnrexamples / self.nrseq_per_batch ) )
print ( "curnrexamples", self.curnrexamples, "seq_length", self.seq_length, " --> nrbatches_per_epoch: ", self.nrbatches_per_epoch )
print ( "loaded data" )
self.reset_batch_pointer( args )
def constructInputFileName( self, args, file_label, imgnr ):
filename = self.data_dir + self.traintest + 'img' + file_label + '-' + str( imgnr ) + '-targetdata.txt' #currently, we expect 14 inputs
return filename
def getInputVectorLength( self, args ):
result = [ ]
filename = self.constructInputFileName( args, self.file_label, imgnr = 0 )
with open( filename ) as f:
points = [ ]
line = f.readline( )
print ( "read sample line from inputdata file: ", line )
nrs = [ float( x ) for x in line.split( ) ]
length = len( | |
% (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isInContact_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('isInContact_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class lookupGroupMembers_args(object):
"""
Attributes:
- groupId
- mids
"""
def __init__(self, groupId=None, mids=None):
self.groupId = groupId
self.mids = mids
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.groupId = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.mids = []
(_, elem990) = iprot.readListBegin()
for _ in range(elem990):
elem991 = iprot.readString()
self.mids.append(elem991)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('lookupGroupMembers_args')
if self.groupId is not None:
oprot.writeFieldBegin('groupId', TType.STRING, 1)
oprot.writeString(self.groupId)
oprot.writeFieldEnd()
if self.mids is not None:
oprot.writeFieldBegin('mids', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.mids))
for elem992 in self.mids:
oprot.writeString(elem992)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.groupId))
value = (value * 31) ^ hash(make_hashable(self.mids))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class lookupGroupMembers_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_, elem993) = iprot.readListBegin()
for _ in range(elem993):
elem994 = SimpleChannelContact()
elem994.read(iprot)
self.success.append(elem994)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('lookupGroupMembers_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for elem995 in self.success:
elem995.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRoomInformation_args(object):
"""
Attributes:
- roomMid
"""
def __init__(self, roomMid=None):
self.roomMid = roomMid
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.roomMid = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getRoomInformation_args')
if self.roomMid is not None:
oprot.writeFieldBegin('roomMid', TType.STRING, 1)
oprot.writeString(self.roomMid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.roomMid))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getRoomInformation_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = Room()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getRoomInformation_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroupCall_args(object):
"""
Attributes:
- chatMid
"""
def __init__(self, chatMid=None):
self.chatMid = chatMid
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.STRING:
self.chatMid = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getGroupCall_args')
if self.chatMid is not None:
oprot.writeFieldBegin('chatMid', TType.STRING, 2)
oprot.writeString(self.chatMid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.chatMid))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getGroupCall_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = GroupCall()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('getGroupCall_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isAllowSecondaryDeviceLogin_args(object):
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('isAllowSecondaryDeviceLogin_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class isAllowSecondaryDeviceLogin_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None):
self.success = success
self.e = e
def read(self, iprot):
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.BOOL:
self.success = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
self.validate()
def write(self, oprot):
self.validate()
oprot.writeStructBegin('isAllowSecondaryDeviceLogin_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.BOOL, 0)
oprot.writeBool(self.success)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(make_hashable(self.success))
value = (value * 31) ^ hash(make_hashable(self.e))
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for | |
from os import system # É importado da biblioteca os a função system que será usado para limpara a tela
from time import sleep # É importado da biblioteca time a função sleep, que serve para p pc "dormir"
from random import randint, choice # É importado da biblioteca random a função randint, que serve para a jogada do pc
# A função choice serve para escolher dentre algo
'''
Desenvolvido por: <NAME>
Ultima atualização: 25/03/2020
A função limpa_tela não tenho certeza se funcionará no terminal de uma máquina linux ou IOS
'''
pontos_jogador1 = pontos_jogador2 = empates = pontos_jogador_contra_pc = pontos_pc = 0
numero_do_jogo = 1
escolha = ''
continuar = ''
jogou = None # Foi utilizado essa variável para não deixar o PC jogar 2 vezes
vitoria = None # Essa variavel teve de ser criado para evitar um bud de o usuário ganhar na ultima rodada
combinacao_vitoria = ([0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6])
# É registrado todas as formas de ser ganhar no jogo da velha, lembrando que o indice começa em 0
def limpa_tela():
system('cls') # Função que limpar a tela do prompt do WINDOWS, não foi testado em máquina Linux e IOS
def menu():
global escolha
while True:
print('=' * 25)
print(
'1 - Como jogar\n'
'2 - Iniciar o jogo'
)
print('=' * 25)
escolha = input('Digite a sua escolha: ')
if escolha == '1':
print(
'''
--- COMO JOGAR ---
Digite o número correspondente à posição no tabuleiro para fazer sua jogada nela.
Por exemplo, caso sua opção seja o canto superior direito, seria digitado o valor 3.
| |
1 | 2 | 3
_____|_____|_____
| |
4 | 5 | 6
_____|_____|_____
| |
7 | 8 | 9
| |
'''
)
elif escolha == '2':
break
else:
print(
'\nA escolha deve ser válida.\n'
)
def redefinindo_tabuleiro():
print(f'''
O PLACAR SÓ É ATUALIZADO NO JOGO SEGUINTE
Partida: {numero_do_jogo}
Vitórias {nome_jogador1}({tipo_jogador1}): {pontos_jogador1}
Vitórias {nome_jogador2}({tipo_jogador2}): {pontos_jogador2}
Empates(velha): {empates}
| |
{tabuleiro_mostrado_usuario[0]} | {tabuleiro_mostrado_usuario[1]} | {tabuleiro_mostrado_usuario[2]}
_____|_____|_____
| |
{tabuleiro_mostrado_usuario[3]} | {tabuleiro_mostrado_usuario[4]} | {tabuleiro_mostrado_usuario[5]}
_____|_____|_____
| |
{tabuleiro_mostrado_usuario[6]} | {tabuleiro_mostrado_usuario[7]} | {tabuleiro_mostrado_usuario[8]}
| |
'''
)
def jogada_multiplayer():
while True:
jogada = input(f'''
Vez de {nome_jogador1 if cont % 2 != 0 else nome_jogador2}({tipo_jogador1 if cont % 2 != 0 else tipo_jogador2}), digite a jogada: '''
)
# Se o estiver na joga impar, o jogador 1 joga, se for par o jogador 2 joga
if jogada in tabuleiro and jogada not in ('X', 'O'): # Verifica se a jogada do jogador está na variavel
# tabuleiro, no caso do 1 ao 9 e também verifica se a jogada não foi um X ou O
tabuleiro[int(jogada) - 1] = tipo_jogador1 if cont % 2 != 0 else tipo_jogador2 # Diminui a jogada do
# jogador já que o indice da lista começa em 0 e acrescenta o X ou O onde ele escolheu
tabuleiro_mostrado_usuario[int(jogada) - 1] = tipo_jogador1 if cont % 2 != 0 else tipo_jogador2
break # Quebra o laço infinito
else:
print(f'''
Poxa {nome_jogador1 if cont % 2 != 0 else nome_jogador2} por favor, uma jogada válida, vai'''
)
def placar():
print(
f'\nO placar ficou da seguinte forma:\n'
f'{nome_jogador1}({tipo_jogador1}) está com {pontos_jogador1} ponto em {numero_do_jogo} partidas\n'
f'{nome_jogador2}({tipo_jogador2}) está com {pontos_jogador2} ponto em {numero_do_jogo} partidas\n'
f'Empates(Velha): {empates}'
)
def checar_se_ha_vitoria():
global pontos_jogador1, pontos_jogador2
for combinacao in combinacao_vitoria: # Um for para percorrer toda a lista das combinações de vitória
if (tabuleiro[combinacao[0]] == tabuleiro[combinacao[1]]) and \
(tabuleiro[combinacao[1]] == tabuleiro[combinacao[2]]): # Verifica se dentro da lista tabuleiro existe
# alguma combinação de vitória
if tabuleiro[combinacao[1]] == tipo_jogador1:
pontos_jogador1 += 1
print(
f'O jogador(a) {nome_jogador1} ganhou, mais um ponto para ele(a). :-)'
)
sleep(2) # Dois segundos para melhorar leitura
placar()
return True
elif tabuleiro[combinacao[1]] == tipo_jogador2:
pontos_jogador2 += 1
print(
f'O jogador(a) {nome_jogador2} ganhou, mais um ponto para ele(a). :-)'
)
sleep(2) # Dois segundos para melhorar leitura
placar()
return True
return False # Foi utilizado os valores booleanos pois facilita na verificação com um IF
def jogada_jogador_contra_pc():
while True:
jogada = input(f'''
Vez de {nome_jogador_contra_pc}({tipo_jogador}), digite a jogada: '''
)
if jogada in tabuleiro and jogada not in ('X', 'O'): # Verifica se a jogada está no tabuleiro e se ela não é
# X ou O
tabuleiro[int(jogada) - 1] = tipo_jogador # O tabuleiro na posição escolhida (menos 1 pois começa em 0 o
# indice) recebe o tipo do jogador
tabuleiro_mostrado_usuario[int(jogada) - 1] = tipo_jogador
break
else:
print(f'''
Poxa {nome_jogador_contra_pc} por favor, uma jogada válida, vai'''
)
def jogada_padrao_pc():
while True:
jogada = randint(1, 9) # Pensa em um número aleatório de 1 a 9 (posições do tabuleiro)
jogada = str(jogada) # Transforma a jogada em string pois na lista tabuleiro todas são strings
if jogada in tabuleiro:
tabuleiro[int(jogada) - 1] = tipo_pc # O tabuleiro na posição randomizado (menos 1 pois começa em 0 o
# indice) recebe o tipo do jogador
tabuleiro_mostrado_usuario[int(jogada) - 1] = tipo_pc
break
else:
continue
def jogada_pc_facil():
print('''
Estou pensando...
'''
)
sleep(2) # sleep para fazer uma forma de o pc "pensar"
jogada_padrao_pc()
def jogada_dificil_extremo():
global jogou
for combinacao in combinacao_vitoria:
if tabuleiro[combinacao[0]] == tabuleiro[combinacao[1]]: # Verifica o indice 0 da combinação, que está sendo
# percorrida nas combinacao_vitoria, na lista tabuleiro, é igual a o indice 1
if tabuleiro[combinacao[1]] == tipo_pc: # Verifica se é do pc mesmo a sequencia
if tabuleiro[combinacao[2]] != tipo_jogador: # Verifica se o jogador já não jogou naquela posição
tabuleiro[combinacao[2]] = tipo_pc # Recebe a jogada do PC no lugar onde ele ganha a partida
tabuleiro_mostrado_usuario[combinacao[2]] = tipo_pc # Recebe a jogada do PC no lugar onde ele
# ganha a partida
jogou = True
break # Quebra o For
if tabuleiro[combinacao[1]] == tipo_jogador: # Verifica se é do jogador mesmo a sequencia
if tabuleiro_mostrado_usuario[combinacao[2]] != tipo_pc: # Verifica se pc já não jogou naquela posição
tabuleiro[combinacao[2]] = tipo_pc # Recebe a jogada do PC no lugar onde invalida a chance do
# jogador ganhar
tabuleiro_mostrado_usuario[combinacao[2]] = tipo_pc
jogou = True
break # Quebra o For
elif tabuleiro[combinacao[1]] == tabuleiro[combinacao[2]]: # Verifica o indice 1 da combinação, que está sendo
# percorrida nas combinacao_vitoria, na lista tabuleiro, é igual a o indice 2
if tabuleiro[combinacao[1]] == tipo_pc: # Verifica se é do pc mesmo a sequencia
if tabuleiro[combinacao[0]] != tipo_jogador: # Verifica se o jogador já não jogou naquela posição
tabuleiro[combinacao[0]] = tipo_pc # Recebe a jogada do PC no lugar onde ele ganha a partida
tabuleiro_mostrado_usuario[combinacao[0]] = tipo_pc # Recebe a jogada do PC no lugar onde ele
# ganha a partida
jogou = True
break # Quebra o For
if tabuleiro[combinacao[1]] == tipo_jogador: # Verifica se é do jogador mesmo a sequencia
if tabuleiro[combinacao[0]] != tipo_pc: # Verifica se pc já não jogou naquela posição
tabuleiro[combinacao[0]] = tipo_pc # Recebe a jogada do PC no lugar onde invalida a chance do
# jogador ganhar
tabuleiro_mostrado_usuario[combinacao[0]] = tipo_pc
jogou = True
break # Quebra o For
elif tabuleiro[combinacao[0]] == tabuleiro[combinacao[2]]: # Verifica o indice 0 da combinação, que está sendo
# percorrida nas combinacao_vitoria, na lista tabuleiro, é igual a o indice 2
if tabuleiro[combinacao[2]] == tipo_pc: # Verifica se é do pc mesmo a sequencia
if tabuleiro[combinacao[1]] != tipo_jogador: # Verifica se o jogador já não jogou naquela posição
tabuleiro[combinacao[1]] = tipo_pc # Recebe a jogada do PC no lugar onde ele ganha a partida
tabuleiro_mostrado_usuario[combinacao[1]] = tipo_pc # Recebe a jogada do PC no lugar onde ele
# ganha a partida
jogou = True
break # Quebra o For
if tabuleiro[combinacao[2]] == tipo_jogador: # Verifica se é do jogador mesmo a sequencia
if tabuleiro[combinacao[1]] != tipo_pc: # Verifica se pc já não jogou naquela posição
tabuleiro[combinacao[1]] = tipo_pc # Recebe a jogada do PC no lugar onde invalida | |
import asyncio
import pytest
from aioasuswrt.asuswrt import (AsusWrt, _LEASES_CMD, _WL_CMD, _IP_NEIGH_CMD,
_ARP_CMD, Device, _RX_COMMAND, _TX_COMMAND,
_TEMP_CMD, _LOADAVG_CMD, _MEMINFO_CMD,
_NETDEV_CMD)
RX_DATA = ["2703926881", ""]
TX_DATA = ["648110137", ""]
RX = 2703926881
TX = 648110137
TEMP_DATA = [
'59 (0x3b)\r',
'69 (0x45)\r',
'CPU temperature : 77'
]
NETDEV_DATA = [
'nter-| Receive | Transmit',
' face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed',
' lo: 129406077 639166 0 0 0 0 0 0 129406077 639166 0 0 0 0 0 0',
' ifb0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' ifb1: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' fwd0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' fwd1: 0 32991574 0 0 0 0 0 0 2758131447 21323444 0 0 0 0 0 0',
' agg: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' eth0: 1376394855 180111514 0 0 0 0 0 0 896208608 161258260 0 0 0 0 0 0',
' dpsta: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' eth1: 240050447 1451957 0 0 0 0 0 47377 2112087504 43036729 0 26277918 0 0 0 0',
' eth2: 0 0 0 0 0 0 0 0 3283428721 33007901 0 2 0 0 0 0',
' vlan1: 35966691832 80394316 0 0 0 0 0 91875 29563557562 53006688 0 0 0 0 0 0',
' vlan2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
' br0: 4643330713 15198823 0 0 0 0 0 0 5699827990 13109400 0 0 0 0 0 0',
' wl0.1: 72308780 385338 0 0 0 0 0 7706 311596615 4150488 0 199907 0 0 0 0',
'ds0.1: 0 0 0 0 0 0 0 0 102404809 805208 0 0 0 0 0 0',
' tun21: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
]
INTERFACES_COUNT = {
'lo': {'tx_bytes': 129406077, 'tx_packets': 639166, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0, 'rx_bytes': 129406077, 'rx_packets': 639166,
'rx_errs': 0, 'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'ifb0': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'ifb1': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'fwd0': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'fwd1': {'tx_bytes': 0, 'tx_packets': 32991574, 'tx_errs': 0,
'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 2758131447, 'rx_packets': 21323444, 'rx_errs': 0,
'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'agg': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'eth0': {'tx_bytes': 1376394855, 'tx_packets': 180111514, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0, 'rx_bytes': 896208608,
'rx_packets': 161258260,
'rx_errs': 0, 'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'dpsta': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'eth1': {'tx_bytes': 240050447, 'tx_packets': 1451957, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 47377, 'rx_bytes': 2112087504,
'rx_packets': 43036729, 'rx_errs': 0, 'rx_drop': 26277918,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'eth2': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 3283428721, 'rx_packets': 33007901, 'rx_errs': 0,
'rx_drop': 2, 'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'vlan1': {'tx_bytes': 35966691832, 'tx_packets': 80394316,
'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0,
'tx_compressed': 0,
'tx_multicast': 91875, 'rx_bytes': 29563557562,
'rx_packets': 53006688, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0,
'rx_colls': 0, 'rx_carrier': 0, 'rx_compressed': 0},
'vlan2': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 0, 'rx_packets': 0, 'rx_errs': 0, 'rx_drop': 0,
'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0},
'br0': {'tx_bytes': 4643330713, 'tx_packets': 15198823, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0, 'rx_bytes': 5699827990,
'rx_packets': 13109400,
'rx_errs': 0, 'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'wl0.1': {'tx_bytes': 72308780, 'tx_packets': 385338, 'tx_errs': 0,
'tx_drop': 0, 'tx_fifo': 0, 'tx_frame': 0,
'tx_compressed': 0,
'tx_multicast': 7706, 'rx_bytes': 311596615,
'rx_packets': 4150488,
'rx_errs': 0, 'rx_drop': 199907, 'rx_fifo': 0, 'rx_colls': 0,
'rx_carrier': 0, 'rx_compressed': 0},
'ds0.1': {'tx_bytes': 0, 'tx_packets': 0, 'tx_errs': 0, 'tx_drop': 0,
'tx_fifo': 0, 'tx_frame': 0, 'tx_compressed': 0,
'tx_multicast': 0,
'rx_bytes': 102404809, 'rx_packets': 805208, 'rx_errs': 0,
'rx_drop': 0, 'rx_fifo': 0, 'rx_colls': 0, 'rx_carrier': 0,
'rx_compressed': 0}
}
LOADAVG_DATA = [
'0.23 0.50 0.68 2/167 13095'
]
MEMINFO_DATA = [
'0.46 0.75 0.77 1/165 2609'
]
WL_DATA = [
'assoclist 01:02:03:04:06:08\r',
'assoclist 08:09:10:11:12:14\r',
'assoclist 08:09:10:11:12:15\r',
'assoclist AB:CD:DE:AB:CD:EF\r'
]
WL_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip=None, name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip=None, name=None),
'08:09:10:11:12:15': Device(
mac='08:09:10:11:12:15', ip=None, name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip=None, name=None)
}
ARP_DATA = [
'? (172.16.17.32) at 01:02:03:04:06:08 [ether] on eth0\r',
'? (172.16.58.3) at 08:09:10:11:12:14 [ether] on br0\r',
'? (192.168.127.12) at AB:CD:DE:AB:CD:EF [ether] on br0\r',
'? (172.16.31.10) at <incomplete> on br0\r',
'? (172.16.10.2) at 00:25:90:12:2D:90 [ether] on br0\r',
]
ARP_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
NEIGH_DATA = [
'172.16.17.32 dev eth0 lladdr 01:02:03:04:06:08 REACHABLE\r',
'172.16.58.3 dev br0 lladdr 08:09:10:11:12:14 REACHABLE\r',
'192.168.127.12 dev br0 lladdr ab:cd:de:ab:cd:ef REACHABLE\r',
'172.16.31.10 dev br0 FAILED\r',
'172.16.17.32 dev br0 lladdr 08:09:15:15:15:15 DELAY\r',
'fe80::feff:a6ff:feff:12ff dev br0 lladdr fc:ff:a6:ff:12:ff STALE\r',
]
NEIGH_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None)
}
LEASES_DATA = [
'51910 01:02:03:04:06:08 172.16.17.32 TV 01:02:03:04:06:08\r',
'79986 01:02:03:04:06:10 172.16.31.10 android 01:02:03:04:06:15\r',
'23523 08:09:10:11:12:14 172.16.58.3 * 08:09:10:11:12:14\r',
]
LEASES_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name='TV'),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name='')
}
WAKE_DEVICES = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name='TV'),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=''),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
WAKE_DEVICES_AP = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
WAKE_DEVICES_NO_IP = {
'01:02:03:04:06:08': Device(
mac='01:02:03:04:06:08', ip='172.16.17.32', name=None),
'08:09:10:11:12:14': Device(
mac='08:09:10:11:12:14', ip='172.16.58.3', name=None),
'08:09:10:11:12:15': Device(
mac='08:09:10:11:12:15', ip=None, name=None),
'AB:CD:DE:AB:CD:EF': Device(
mac='AB:CD:DE:AB:CD:EF', ip='192.168.127.12', name=None),
'00:25:90:12:2D:90': Device(
mac='00:25:90:12:2D:90', ip='172.16.10.2', name=None)
}
def RunCommandMock(command, *args, **kwargs):
print(command, *args, **kwargs)
f = asyncio.Future()
if command == _WL_CMD:
f.set_result(WL_DATA)
return f
if command == _LEASES_CMD.format('/var/lib/misc'):
f.set_result(LEASES_DATA)
return f
if command == _IP_NEIGH_CMD:
f.set_result(NEIGH_DATA)
return f
if command == _ARP_CMD:
f.set_result(ARP_DATA)
return f
if command == _RX_COMMAND.format('eth0'):
f.set_result(RX_DATA)
return f
if command == _TX_COMMAND.format('eth0'):
f.set_result(TX_DATA)
return f
if command == _TEMP_CMD:
f.set_result(TEMP_DATA)
return f
if command == _LOADAVG_CMD:
f.set_result(LOADAVG_DATA)
return f
if command == _MEMINFO_CMD:
f.set_result(MEMINFO_DATA)
return f
if command == _NETDEV_CMD:
f.set_result(NETDEV_DATA)
return f
raise Exception("Unhandled command: %s" % command)
def RunCommandEmptyMock(command, *args, **kwargs):
f = asyncio.Future()
f.set_result("")
return f
@pytest.mark.asyncio
async def test_get_wl(event_loop, mocker):
"""Testing wl."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
devices = await scanner.async_get_wl()
assert WL_DEVICES == devices
@pytest.mark.asyncio
async def test_get_wl_empty(event_loop, mocker):
"""Testing wl."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandEmptyMock)
scanner = AsusWrt(host="localhost", port=22)
devices = await scanner.async_get_wl()
assert {} == devices
@pytest.mark.asyncio
async def test_async_get_leases(event_loop, mocker):
"""Testing leases."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
data = await scanner.async_get_leases(NEIGH_DEVICES.copy())
assert LEASES_DEVICES == data
@pytest.mark.asyncio
async def test_get_arp(event_loop, mocker):
"""Testing arp."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", port=22)
data = await scanner.async_get_arp()
assert ARP_DEVICES == data
@pytest.mark.asyncio
async def test_get_neigh(event_loop, mocker):
"""Testing neigh."""
mocker.patch(
'aioasuswrt.connection.SshConnection.async_run_command',
side_effect=RunCommandMock)
scanner = AsusWrt(host="localhost", | |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Author: <NAME> (ccbogel)
https://github.com/ccbogel/QualCoder
"""
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
import csv
from datetime import datetime
import logging
import os
import sqlite3
import sys
import traceback
from .GUI.base64_helper import *
from .GUI.ui_dialog_SQL import Ui_Dialog_sql
from .save_sql_query import DialogSaveSql
from .helpers import ExportDirectoryPathDialog, Message
from .highlighter import Highlighter
path = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def exception_handler(exception_type, value, tb_obj):
""" Global exception handler useful in GUIs.
tb_obj: exception.__traceback__ """
tb = '\n'.join(traceback.format_tb(tb_obj))
text_ = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value)
print(text_)
logger.error(_("Uncaught exception: ") + text_)
QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text_)
class DialogSQL(QtWidgets.QDialog):
""" Uses single inheritance, sub-class QDialog and set up the user interface in
the __init__() method.
A gui to allow the user to enter sql queries and return results.
Data outputs are as tab (or other) separated files.
EXTRA_SQL is listed at end of module for additional complex queries. """
app = None
schema = None
parent_textEdit = None
sql = ""
stored_sqls = [] # a list of dictionaries of user created sql, as {index, sql}
default_sqls = [] # a list of dictionaries of default sql, as {index, sql}
file_data = [] # for file exports
results = None # SQL results
queryTime = "" # for label tooltip
queryFilters = "" # for label tooltip
cell_value = ""
row = -1
col = -1
def __init__(self, app_, parent_textedit):
sys.excepthook = exception_handler
QtWidgets.QDialog.__init__(self)
self.app = app_
self.parent_textEdit = parent_textedit
self.queryTime = ""
self.queryFilters = ""
# Set up the user interface from Designer.
self.ui = Ui_Dialog_sql()
self.ui.setupUi(self)
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
font = 'font: ' + str(self.app.settings['fontsize']) + 'pt '
font += '"' + self.app.settings['font'] + '";'
self.setStyleSheet(font)
doc_font = 'font: ' + str(self.app.settings['docfontsize']) + 'pt '
doc_font += '"' + self.app.settings['font'] + '";'
self.ui.tableWidget_results.setStyleSheet(doc_font)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
highlighter = Highlighter(self.ui.textEdit_sql)
if self.app.settings['stylesheet'] == "dark":
highlighter.create_rules(dark=True)
self.ui.textEdit_sql.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.textEdit_sql.customContextMenuRequested.connect(self.sql_menu)
self.ui.tableWidget_results.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.tableWidget_results.customContextMenuRequested.connect(self.table_menu)
# Add tables and fields to treeWidget
self.get_schema_update_tree_widget()
self.ui.treeWidget.itemClicked.connect(self.get_item)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.tree_menu)
self.ui.pushButton_runSQL.clicked.connect(self.run_sql)
pm = QtGui.QPixmap()
pm.loadFromData(QtCore.QByteArray.fromBase64(cogs_icon), "png")
self.ui.pushButton_runSQL.setIcon(QtGui.QIcon(pm))
self.ui.pushButton_export.clicked.connect(self.export_file)
pm = QtGui.QPixmap()
pm.loadFromData(QtCore.QByteArray.fromBase64(doc_export_csv_icon), "png")
self.ui.pushButton_export.setIcon(QtGui.QIcon(pm))
self.ui.splitter.setSizes([20, 180])
try:
s0 = int(self.app.settings['dialogsql_splitter_h0'])
s1 = int(self.app.settings['dialogsql_splitter_h1'])
if s0 > 10 and s1 > 10:
self.ui.splitter.setSizes([s0, s1])
except KeyError:
pass
self.ui.splitter_2.setSizes([10, 290])
try:
s0 = int(self.app.settings['dialogsql_splitter_v0'])
s1 = int(self.app.settings['dialogsql_splitter_v1'])
if s0 > 10 and s1 > 10:
self.ui.splitter_2.setSizes([s0, s1])
except KeyError:
pass
self.ui.splitter.splitterMoved.connect(self.update_sizes)
self.ui.splitter_2.splitterMoved.connect(self.update_sizes)
def update_sizes(self):
""" Called by splitter resized """
sizes = self.ui.splitter.sizes()
self.app.settings['dialogsql_splitter_h0'] = sizes[0]
self.app.settings['dialogsql_splitter_h1'] = sizes[1]
sizes = self.ui.splitter_2.sizes()
self.app.settings['dialogsql_splitter_v0'] = sizes[0]
self.app.settings['dialogsql_splitter_v1'] = sizes[1]
def export_file(self):
""" Load result set and export results to a delimited .csv file
using \r\n as line separators. """
cur = self.app.conn.cursor()
sql = self.ui.textEdit_sql.toPlainText()
try:
cur.execute(sql)
except Exception as e:
Message(self.app, _("SQL error"), str(e), "warning").exec_()
return
results = cur.fetchall()
header = []
if cur.description is not None:
header = list(map(lambda x: x[0], cur.description)) # gets column names
filename = "sql_report.csv"
export_dir = ExportDirectoryPathDialog(self.app, filename)
filepath = export_dir.filepath
if filepath is None:
return
print("FP", filepath)
quote_option = csv.QUOTE_MINIMAL
if self.ui.checkBox_quote.isChecked():
quote_option = csv.QUOTE_ALL
delimiter_ = str(self.ui.comboBox_delimiter.currentText())
if delimiter_ == "tab":
delimiter_ = "\t"
with open(filepath, 'wt', encoding='utf-8-sig') as export_file:
csv_writer = csv.writer(export_file, delimiter=delimiter_, quoting=quote_option)
csv_writer.writerow(header)
for row in results:
csv_writer.writerow(row)
msg = _("SQL Results exported to: ") + filepath
self.parent_textEdit.append(msg)
self.parent_textEdit.append(_("Query:") + "\n" + sql)
Message(self.app, _("CSV file export"), msg, "information").exec_()
def get_item(self):
""" Get the selected table name or tablename.fieldname and add to the sql text
at the current cursor position.
Get a default query and replace sql text in text edit.
Get a stored query and replace sql text in text edit """
item_text = self.ui.treeWidget.currentItem().text(0)
index = self.ui.treeWidget.currentIndex()
# Check use stored sql to fill corect text for sql
for s in self.stored_sqls:
if index == s['index']:
self.ui.textEdit_sql.clear()
self.ui.textEdit_sql.setText(s['sql'])
return
for d in self.default_sqls:
if index == d['index']:
self.ui.textEdit_sql.clear()
self.ui.textEdit_sql.setText(d['sql'])
return
if index.parent().row() != -1: # there is a parent if not -1
item_parent = self.ui.treeWidget.itemFromIndex(index.parent())
item_parent_text = item_parent.text(0)
'''if item_parent_text == "Default Queries":
self.ui.textEdit_sql.clear()
self.ui.textEdit_sql.setText(item_text)
return
if item_parent_text != "Default Queries":'''
item_text = item_parent_text + "." + item_text
cursor = self.ui.textEdit_sql.textCursor()
cursor.insertText(" " + item_text + " ")
def run_sql(self):
""" Run the sql text and add the results to the results text edit. """
# Clear tableWidget and file data
num_rows = self.ui.tableWidget_results.rowCount()
for row in range(0, num_rows):
self.ui.tableWidget_results.removeRow(0)
self.ui.tableWidget_results.setHorizontalHeaderLabels([""])
self.file_data = []
self.ui.label.setText(_("Running query. Please wait."))
QtWidgets.QApplication.processEvents() # stops gui freeze
self.sql = self.ui.textEdit_sql.toPlainText()
cur = self.app.conn.cursor()
self.sql = str(self.sql)
QtWidgets.QApplication.processEvents() # stops gui freeze
try:
time0 = datetime.now()
cur.execute(self.sql)
self.ui.label.setToolTip("")
self.results = cur.fetchall()
time1 = datetime.now()
timediff = time1 - time0
self.queryTime = "Time:" + str(timediff)
self.ui.label.setToolTip(self.queryTime)
self.ui.label.setText(str(len(self.results)) + _(" rows"))
# Extra messaging where rows will be zero
if self.sql[0:12].upper() == "CREATE TABLE":
self.ui.label.setText(_("Table created"))
self.app.delete_backup = False
if self.sql[0:12].upper() == "CREATE INDEX":
self.ui.label.setText(_("Index created"))
self.app.delete_backup = False
self.app.conn.commit()
if self.sql[0:6].upper() == "DELETE":
self.ui.label.setText(str(cur.rowcount) + _(" rows deleted"))
self.app.delete_backup = False
self.app.conn.commit()
if self.sql[0:6].upper() == "UPDATE":
self.ui.label.setText(str(cur.rowcount) + _(" rows updated"))
self.app.delete_backup = False
self.app.conn.commit()
col_names = []
if cur.description is not None:
col_names = list(map(lambda x: x[0], cur.description)) # gets column names
self.ui.tableWidget_results.setColumnCount(len(col_names))
self.ui.tableWidget_results.setHorizontalHeaderLabels(col_names)
self.file_data.append(col_names)
for row, row_results in enumerate(self.results):
self.file_data.append(row_results)
self.ui.tableWidget_results.insertRow(row)
for col, value in enumerate(row_results):
if value is None:
value = ""
cell = QtWidgets.QTableWidgetItem(str(value))
cell.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled)
self.ui.tableWidget_results.setItem(row, col, cell)
self.ui.tableWidget_results.resizeColumnsToContents()
# Keep column widths reasonable, 450 pixels max
for i in range(self.ui.tableWidget_results.columnCount()):
if self.ui.tableWidget_results.columnWidth(i) > 500:
self.ui.tableWidget_results.setColumnWidth(i, 500)
self.ui.tableWidget_results.resizeRowsToContents()
sql_string = str(self.sql).upper()
if sql_string.find("CREATE ") == 0 or sql_string.find("DROP ") == 0 or sql_string.find("ALTER ") == 0:
self.get_schema_update_tree_widget()
except (sqlite3.OperationalError, sqlite3.IntegrityError) as e:
Message(self.app, _("Error"), str(e), "warning").exec_()
self.ui.label.setText(_("SQL Error"))
self.ui.label.setToolTip(str(e))
self.results = None
self.app.conn.commit()
def get_schema_update_tree_widget(self):
""" Get table schema from database, and update the tables_an_views tree widget.
The schema needs to be updated when drop table or create queries are run. """
self.stored_sqls = []
self.schema = []
table_dict = {}
cur = self.app.conn.cursor()
cur.execute("SELECT sql, type, name FROM sqlite_master WHERE type IN ('table', 'view') ")
result = cur.fetchall()
for row in result:
table_name = row[2]
fields = []
field_results = cur.execute("PRAGMA table_info(" + table_name + ")")
# each field is a tuple of cid, name, type (integer, text, ), notNull (1=notNull),
# defaultValue(None usually), primaryKey(as integers 1 up, or 0)
for field in field_results:
fields.append(field)
table_dict[table_name] = fields
self.schema = table_dict
# Fill tree widget with tables and views
tables_and_views = []
for k in self.schema.keys():
tables_and_views.append(k)
tables_and_views.sort()
self.ui.treeWidget.clear()
for table_name in tables_and_views:
top_item = QtWidgets.QTreeWidgetItem()
top_item.setText(0, table_name)
result = cur.execute("SELECT type FROM sqlite_master WHERE name='" + table_name + "' ")
table_or_view = result.fetchone()[0]
if table_or_view == "view":
top_item.setBackground(0, QtGui.QBrush(Qt.yellow, Qt.Dense6Pattern))
self.ui.treeWidget.addTopLevelItem(top_item)
for field in self.schema[table_name]:
field_item = QtWidgets.QTreeWidgetItem()
if table_or_view == "view":
field_item.setBackground(0, QtGui.QBrush(Qt.yellow, Qt.Dense6Pattern))
if field[5] > 0:
field_item.setForeground(0, QtGui.QBrush(Qt.red))
field_item.setText(0, field[1])
top_item.addChild(field_item)
# Add default sqls
default_item = QtWidgets.QTreeWidgetItem()
default_item.setText(0, _("Default Queries"))
self.ui.treeWidget.addTopLevelItem(default_item)
for query in EXTRA_SQL:
item = QtWidgets.QTreeWidgetItem()
title = query.split('\n')[0]
item.setText(0, title)
default_item.addChild(item)
self.default_sqls.append({'index': self.ui.treeWidget.indexFromItem(item), 'sql': query})
# Add user stored queries
sql = "select title, description, grouper, ssql from stored_sql | |
== len(desc):
return
std = "expected %d warnings, found %d: wlist=%s desc=%r" % \
(len(desc), len(wlist), self._formatWarningList(wlist), desc)
raise self.failureException(self._formatMessage(msg, std))
def consumeWarningList(self, wlist, desc=None, *args, **kwds):
"""[deprecated] assertWarningList() variant that clears list afterwards"""
if desc is None:
desc = []
self.assertWarningList(wlist, desc, *args, **kwds)
del wlist[:]
def _formatWarning(self, entry):
tail = ""
if hasattr(entry, "message"):
# WarningMessage instance.
tail = " filename=%r lineno=%r" % (entry.filename, entry.lineno)
if entry.line:
tail += " line=%r" % (entry.line,)
entry = entry.message
cls = type(entry)
return "<%s.%s message=%r%s>" % (cls.__module__, cls.__name__,
str(entry), tail)
def _formatWarningList(self, wlist):
return "[%s]" % ", ".join(self._formatWarning(entry) for entry in wlist)
#===================================================================
# capability tests
#===================================================================
def require_stringprep(self):
"helper to skip test if stringprep is missing"
from passlib.utils import stringprep
if not stringprep:
from passlib.utils import _stringprep_missing_reason
raise self.skipTest("not available - stringprep module is " +
_stringprep_missing_reason)
def require_TEST_MODE(self, level):
"skip test for all PASSLIB_TEST_MODE values below <level>"
if not TEST_MODE(level):
raise self.skipTest("requires >= %r test mode" % level)
def require_writeable_filesystem(self):
"skip test if writeable FS not available"
if GAE:
return self.skipTest("GAE doesn't offer read/write filesystem access")
#===================================================================
# other
#===================================================================
_mktemp_queue = None
def mktemp(self, *args, **kwds):
"create temp file that's cleaned up at end of test"
self.require_writeable_filesystem()
fd, path = tempfile.mkstemp(*args, **kwds)
os.close(fd)
queue = self._mktemp_queue
if queue is None:
queue = self._mktemp_queue = []
def cleaner():
for path in queue:
if os.path.exists(path):
os.remove(path)
del queue[:]
self.addCleanup(cleaner)
queue.append(path)
return path
#===================================================================
# eoc
#===================================================================
#=============================================================================
# other unittest helpers
#=============================================================================
RESERVED_BACKEND_NAMES = ["any", "default"]
class HandlerCase(TestCase):
"""base class for testing password hash handlers (esp passlib.utils.handlers subclasses)
In order to use this to test a handler,
create a subclass will all the appropriate attributes
filled as listed in the example below,
and run the subclass via unittest.
.. todo::
Document all of the options HandlerCase offers.
.. note::
This is subclass of :class:`unittest.TestCase`
(or :class:`unittest2.TestCase` if available).
"""
#===================================================================
# class attrs - should be filled in by subclass
#===================================================================
#---------------------------------------------------------------
# handler setup
#---------------------------------------------------------------
# handler class to test [required]
handler = None
# if set, run tests against specified backend
backend = None
#---------------------------------------------------------------
# test vectors
#---------------------------------------------------------------
# list of (secret, hash) tuples which are known to be correct
known_correct_hashes = []
# list of (config, secret, hash) tuples are known to be correct
known_correct_configs = []
# list of (alt_hash, secret, hash) tuples, where alt_hash is a hash
# using an alternate representation that should be recognized and verify
# correctly, but should be corrected to match hash when passed through
# genhash()
known_alternate_hashes = []
# hashes so malformed they aren't even identified properly
known_unidentified_hashes = []
# hashes which are identifiabled but malformed - they should identify()
# as True, but cause an error when passed to genhash/verify.
known_malformed_hashes = []
# list of (handler name, hash) pairs for other algorithm's hashes that
# handler shouldn't identify as belonging to it this list should generally
# be sufficient (if handler name in list, that entry will be skipped)
known_other_hashes = [
('des_crypt', '6f8c114b58f2c'),
('md5_crypt', '$1$dOHYPKoP$tnxS1T8Q6VVn3kpV8cN6o.'),
('sha512_crypt', "$6$rounds=123456$asaltof16chars..$BtCwjqMJGx5hrJhZywW"
"vt0RLE8uZ4oPwcelCjmw2kSYu.Ec6ycULevoBK25fs2xXgMNrCzIMVcgEJAstJeonj1"),
]
# passwords used to test basic encrypt behavior - generally
# don't need to be overidden.
stock_passwords = [
u("test"),
u("\u20AC\u00A5$"),
b('\xe2\x82\xac\xc2\xa5$')
]
#---------------------------------------------------------------
# option flags
#---------------------------------------------------------------
# maximum number of chars which hash will include in digest.
# ``None`` (the default) indicates the hash uses ALL of the password.
secret_size = None
# whether hash is case insensitive
# True, False, or special value "verify-only" (which indicates
# hash contains case-sensitive portion, but verifies is case-insensitive)
secret_case_insensitive = False
# flag if scheme accepts ALL hash strings (e.g. plaintext)
accepts_all_hashes = False
# flag indicating "disabled account" handler (e.g. unix_disabled)
is_disabled_handler = False
# flag/hack to filter PasslibHashWarning issued by test_72_configs()
filter_config_warnings = False
# forbid certain characters in passwords
@classproperty
def forbidden_characters(cls):
# anything that supports crypt() interface should forbid null chars,
# since crypt() uses null-terminated strings.
if 'os_crypt' in getattr(cls.handler, "backends", ()):
return b("\x00")
return None
#===================================================================
# internal class attrs
#===================================================================
__unittest_skip = True
@property
def descriptionPrefix(self):
handler = self.handler
name = handler.name
if hasattr(handler, "get_backend"):
name += " (%s backend)" % (handler.get_backend(),)
return name
#===================================================================
# internal instance attrs
#===================================================================
# indicates safe_crypt() has been patched to use another backend of handler.
using_patched_crypt = False
#===================================================================
# support methods
#===================================================================
#---------------------------------------------------------------
# configuration helpers
#---------------------------------------------------------------
@property
def supports_config_string(self):
return self.do_genconfig() is not None
@classmethod
def iter_known_hashes(cls):
"iterate through known (secret, hash) pairs"
for secret, hash in cls.known_correct_hashes:
yield secret, hash
for config, secret, hash in cls.known_correct_configs:
yield secret, hash
for alt, secret, hash in cls.known_alternate_hashes:
yield secret, hash
def get_sample_hash(self):
"test random sample secret/hash pair"
known = list(self.iter_known_hashes())
return rng.choice(known)
#---------------------------------------------------------------
# test helpers
#---------------------------------------------------------------
def check_verify(self, secret, hash, msg=None, negate=False):
"helper to check verify() outcome, honoring is_disabled_handler"
result = self.do_verify(secret, hash)
self.assertTrue(result is True or result is False,
"verify() returned non-boolean value: %r" % (result,))
if self.is_disabled_handler or negate:
if not result:
return
if not msg:
msg = ("verify incorrectly returned True: secret=%r, hash=%r" %
(secret, hash))
raise self.failureException(msg)
else:
if result:
return
if not msg:
msg = "verify failed: secret=%r, hash=%r" % (secret, hash)
raise self.failureException(msg)
def check_returned_native_str(self, result, func_name):
self.assertIsInstance(result, str,
"%s() failed to return native string: %r" % (func_name, result,))
#---------------------------------------------------------------
# PasswordHash helpers - wraps all calls to PasswordHash api,
# so that subclasses can fill in defaults and account for other specialized behavior
#---------------------------------------------------------------
def populate_settings(self, kwds):
"subclassable method to populate default settings"
# use lower rounds settings for certain test modes
handler = self.handler
if 'rounds' in handler.setting_kwds and 'rounds' not in kwds:
mn = handler.min_rounds
df = handler.default_rounds
if TEST_MODE(max="quick"):
# use minimum rounds for quick mode
kwds['rounds'] = max(3, mn)
else:
# use default/16 otherwise
factor = 3
if getattr(handler, "rounds_cost", None) == "log2":
df -= factor
else:
df = df//(1<<factor)
kwds['rounds'] = max(3, mn, df)
def populate_context(self, secret, kwds):
"subclassable method allowing 'secret' to be encode context kwds"
return secret
def do_encrypt(self, secret, **kwds):
"call handler's encrypt method with specified options"
secret = self.populate_context(secret, kwds)
self.populate_settings(kwds)
return self.handler.encrypt(secret, **kwds)
def do_verify(self, secret, hash, **kwds):
"call handler's verify method"
secret = self.populate_context(secret, kwds)
return self.handler.verify(secret, hash, **kwds)
def do_identify(self, hash):
"call handler's identify method"
return self.handler.identify(hash)
def do_genconfig(self, **kwds):
"call handler's genconfig method with specified options"
self.populate_settings(kwds)
return self.handler.genconfig(**kwds)
def do_genhash(self, secret, config, **kwds):
"call handler's genhash method with specified options"
secret = self.populate_context(secret, kwds)
return self.handler.genhash(secret, config, **kwds)
#---------------------------------------------------------------
# automatically generate subclasses for testing specific backends,
# and other backend helpers
#---------------------------------------------------------------
@classmethod
def _enable_backend_case(cls, backend):
"helper for create_backend_cases(); returns reason to skip backend, or None"
handler = cls.handler
if not is_default_backend(handler, backend) and not TEST_MODE("full"):
return "only default backend is being tested"
if handler.has_backend(backend):
return None
if handler.name == "bcrypt" and backend == "builtin" and TEST_MODE("full"):
# this will be auto-enabled under TEST_MODE 'full'.
return None
from passlib.utils import has_crypt
if backend == "os_crypt" and has_crypt:
if TEST_MODE("full") and cls.find_crypt_replacement():
# in this case, HandlerCase will monkeypatch os_crypt
# to use another backend, just so we can test os_crypt fully.
return None
else:
return "hash not supported by os crypt()"
return "backend not available"
@classmethod
def create_backend_cases(cls, backends, module=None):
handler = cls.handler
name = handler.name
assert hasattr(handler, "backends"), "handler must support uh.HasManyBackends protocol"
for backend in backends:
assert backend in handler.backends, "unknown backend: %r" % (backend,)
bases = (cls,)
if backend == "os_crypt":
bases += (OsCryptMixin,)
subcls = type(
"%s_%s_test" % (name, backend),
bases,
dict(
descriptionPrefix = "%s (%s backend)" % (name, backend),
backend = backend,
__module__= module or cls.__module__,
)
)
skip_reason = cls._enable_backend_case(backend)
if skip_reason:
subcls = skip(skip_reason)(subcls)
yield subcls
@classmethod
def find_crypt_replacement(cls):
"find other backend which can be used to mock the os_crypt backend"
handler = cls.handler
for name in handler.backends:
if name != "os_crypt" and handler.has_backend(name):
| |
import sqlite3
from sqlite3 import Error
import datetime
import csv
import urllib.request as urllib2
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
class BancoDeDados():
def __init__(self, arquivo):
try:
os.makedirs(os.path.join(dir_path, 'log'))
except FileExistsError:
# directory already exists
pass
self.conn = self.create_connection(arquivo)
if self.rpi_online():
self.hora_inicio = str(datetime.datetime.now())[:-7]
else:
self.hora_inicio = '[RPI OFFLINE]' + str(datetime.datetime.now())[:-7]
def rpi_online(self):
try:
urllib2.urlopen('http://www.google.com', timeout=1)
return True
# except urllib2.URLError:
except:
return False
def fechar_conn(self):
self.conn.close()
def create_table_admin(self):
nova_tabela = "CREATE TABLE IF NOT EXISTS admin(id INTEGER PRIMARY " +\
"KEY, tag TEXT, login TEXT, nome TEXT UNIQUE, email TEXT, " +\
"senha TEXT)"
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_table_variaveis(self):
nova_tabela = "CREATE TABLE IF NOT EXISTS variaveis(id INTEGER PRIMARY " +\
"KEY, variavel TEXT UNIQUE, valor TEXT)"
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_table_usuarios(self):
nova_tabela = "CREATE TABLE IF NOT EXISTS usuarios(id INTEGER PRIMARY " +\
"KEY, tag TEXT UNIQUE, login TEXT UNIQUE, nome TEXT UNIQUE, email TEXT, adicionado_por TEXT, " +\
"permissao TEXT, senha TEXT, grupo TEXT)"
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_table_usuarios_antigos(self):
nova_tabela = "CREATE TABLE IF NOT EXISTS usuarios_antigos(id INTEGER PRIMARY " +\
"KEY, tag TEXT UNIQUE, login TEXT UNIQUE, nome TEXT UNIQUE, email TEXT, adicionado_por TEXT, " +\
"permissao TEXT, senha TEXT, grupo TEXT)"
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_table_autorizacao_equip(self):
nova_tabela = '''CREATE TABLE IF NOT EXISTS autorizacao_equip(id INTEGER
PRIMARY KEY, equipamento TEXT, login TEXT, nome TEXT, super TEXT,
UNIQUE(login, equipamento))'''
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_table_presenca(self):
nova_tabela = '''CREATE TABLE IF NOT EXISTS presenca(id INTEGER
PRIMARY KEY, login TEXT, nome TEXT, tag TEXT, hora_entrada TEXT,
hora_saida TEXT)'''
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_table_uso_equip(self):
nova_tabela = '''CREATE TABLE IF NOT EXISTS uso_equip(id INTEGER PRIMARY
KEY, login TEXT, nome TEXT, equipamento TEXT, hora_inicio TEXT, hora_fim TEXT,
tempo_total TEXT, comentario TEXT, situacao Text)'''
try:
c = self.conn.cursor()
c.execute(nova_tabela)
except Error as e:
print(e)
def create_connection(self, arquivo):
""" create a database connection to a SQLite database """
try:
self.conn = sqlite3.connect(arquivo)
if self.conn is not None:
self.create_table_usuarios() # criando tabela usuarios
self.create_table_usuarios_antigos() # criando tabela usuarios antigos
self.create_table_admin()
self.create_table_variaveis()
self.create_table_autorizacao_equip() # tabela de autorizacoes
# criando controle de uso de equip.
self.create_table_uso_equip()
self.create_table_presenca()
self.create_csv_files()
# password = <PASSWORD>(password)
# self.add_novo_admin("sem tag", "admin", "Administrador-Germano", "<EMAIL>", password)
else:
print("Error! cannot create the database connection.")
return self.conn
except Error as e:
print(e)
return None
def create_csv_files(self):
try:
open(os.path.join(dir_path, 'log/tabela_uso_equip.csv'), 'r')
except IOError:
with open(
os.path.join(dir_path, 'log/tabela_uso_equip.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow([
"id", "login", "nome", "equipamento", "hora_inicio", "hora_fim",
"tempo_total", "comentário", "situação"
])
try:
open(os.path.join(dir_path, 'log/tabela_presenca.csv'), 'r')
except IOError:
with open(os.path.join(dir_path, 'log/tabela_presenca.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow(
["id", "login","nome", "tag", "hora_entrada", "hora_saida"])
try:
open(os.path.join(dir_path, 'log/tabela_autorizacao_equip.csv'),'r')
except IOError:
with open(
os.path.join(dir_path, 'log/tabela_autorizacao_equip.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow(["id", "equipamento", "login", "nome", "super"])
try:
open(os.path.join(dir_path, 'log/tabela_usuarios.csv'), 'r')
except IOError:
with open(os.path.join(dir_path, 'log/tabela_usuarios.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow([
"id", "tag", "login", "nome", "email", "adicionado_por",
"permissao", "senha", "grupo/orientador"
])
try:
open(os.path.join(dir_path, 'log/tabela_usuarios_antigos.csv'), 'r')
except IOError:
with open(os.path.join(dir_path, 'log/tabela_usuarios_antigos.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow([
"id", "tag", "login", "nome", "email", "adicionado_por",
"permissao", "senha", "grupo/orientador"
])
def check_tabela_presenca(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM presenca")
rows = cur.fetchall()
return rows
except Error as e:
print(e)
def check_tabela_variaveis(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM variaveis")
rows = cur.fetchall()
return rows
except Error as e:
print(e)
def check_tabela_usuarios(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM usuarios")
rows = cur.fetchall()
return rows
except Error as e:
print(e)
def check_tabela_usuarios_antigos(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM usuarios_antigos")
rows = cur.fetchall()
return rows
except Error as e:
print(e)
# def check_lista_usuarios(self):
def check_lista_nomes_usuarios(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT nome FROM usuarios")
rows = cur.fetchall()
return [item[0] for item in rows]
except Error as e:
print(e)
def check_lista_logins_usuarios(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT login FROM usuarios")
rows = cur.fetchall()
return [item[0] for item in rows]
except Error as e:
print(e)
def check_tabela_autorizacao_equip(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM autorizacao_equip")
rows = cur.fetchall()
return rows
except Error as e:
print(e)
def check_tabela_uso_equip(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM uso_equip")
rows = cur.fetchall()
return rows
except Error as e:
print(e)
def export_all_db_to_csv(self):
tabela_uso_equip = self.check_tabela_uso_equip()
tabela_autorizacao_equip = self.check_tabela_autorizacao_equip()
tabela_usuarios = self.check_tabela_usuarios()
tabela_usuarios_antigos = self.check_tabela_usuarios_antigos()
tabela_presenca = self.check_tabela_presenca()
with open(os.path.join(dir_path, 'log/tabela_uso_equip.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow([
"id", "login", "nome", "equipamento", "hora_inicio", "hora_fim",
"tempo_total", "comentario", "situacao"
])
for row in tabela_uso_equip:
spamwriter.writerow(row)
with open(
os.path.join(dir_path, 'log/tabela_autorizacao_equip.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow(["id", "equipamento", "login", "nome", "super"])
for row in tabela_autorizacao_equip:
spamwriter.writerow(row)
with open(os.path.join(dir_path, 'log/tabela_usuarios.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow([
"id", "tag", "login", "nome", "email", "adicionado_por", "permissao",
"senha", "grupo/orientador"
])
for row in tabela_usuarios:
spamwriter.writerow(row)
with open(os.path.join(dir_path, 'log/tabela_usuarios_antigos.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow([
"id", "tag", "login", "nome", "email", "adicionado_por", "permissao",
"senha", "grupo/orientador"
])
for row in tabela_usuarios_antigos:
spamwriter.writerow(row)
with open(os.path.join(dir_path, 'log/tabela_presenca.csv'),
'w+') as csv_file:
spamwriter = csv.writer(csv_file, delimiter=';')
spamwriter.writerow(
["id", "login", "nome", "tag", "hora_entrada", "hora_saida"])
for row in tabela_presenca:
spamwriter.writerow(row)
def add_novo_admin(self,nome,email,password):
try:
sql = "INSERT INTO admin(tag, login, nome, email, senha) VALUES(?,?,?,?,?)"
cur = self.conn.cursor()
cur.execute(sql, ("sem tag", "admin", nome, email, password))
self.conn.commit()
return True
except Error as e:
print(e)
return False
def add_variavel(self,variavel_txt, valor):
try:
sql = "INSERT INTO variaveis(variavel, valor) VALUES(?,?)"
cur = self.conn.cursor()
cur.execute(sql, (variavel_txt, valor))
self.conn.commit()
return True
except Error as e:
print(e)
return False
def add_novo_usuario(self,
tag_novo,
login,
nome,
email,
password,
tag_autorizacao,
grupo,
permissao='apenas uso'):
try:
sql = "INSERT INTO usuarios(tag, login, nome, email, senha ,adicionado_por, permissao, grupo) VALUES(?,?,?,?,?,?,?,?)"
cur = self.conn.cursor()
cur.execute(sql, (tag_novo, login, nome, email, password, tag_autorizacao,
permissao, grupo))
self.conn.commit()
return True
except Error as e:
print(e)
return False
def usuario_aposentado(self,
tag_novo,
login,
nome,
email,
password,
tag_autorizacao,
grupo,
permissao):
try:
sql = "INSERT INTO usuarios_antigos(tag, login, nome, email, senha ,adicionado_por, permissao, grupo) VALUES(?,?,?,?,?,?,?,?)"
cur = self.conn.cursor()
cur.execute(sql, (tag_novo, login, nome, email, password, tag_autorizacao,
permissao, grupo))
self.conn.commit()
return True
except Error as e:
print(e)
return False
def remove_usuario_para_recadastro(self, login):
try:
sql = 'DELETE FROM usuarios WHERE login=?'
cur = self.conn.cursor()
cur.execute(sql, (login, ))
sql = 'DELETE FROM autorizacao_equip WHERE login=?'
cur = self.conn.cursor()
cur.execute(sql, (login, ))
self.conn.commit()
except Error as e:
print(e)
def remove_usuario_por_login(self, login):
dados = self.check_usuario(login)
idx, tag, login, nome, email, add_por, permissao, senha, grupo = dados
try:
self.usuario_aposentado(tag,login,nome,email,senha,add_por,grupo,permissao)
sql = 'DELETE FROM usuarios WHERE login=?'
cur = self.conn.cursor()
cur.execute(sql, (login, ))
sql = 'DELETE FROM autorizacao_equip WHERE login=?'
cur = self.conn.cursor()
cur.execute(sql, (login, ))
self.conn.commit()
except Error as e:
print(e)
#
# def check_usuario(self, tag_ou_nome):
# try:
# cur = self.conn.cursor()
# cur.execute("SELECT * FROM usuarios WHERE nome=?", (tag_ou_nome, ))
# row = cur.fetchall()
# if row == []:
# cur = self.conn.cursor()
# cur.execute("SELECT * FROM usuarios WHERE tag=?",
# (tag_ou_nome, ))
# row = cur.fetchall()
# return row
# except Error as e:
# print(e)
def check_variavel(self, variavel_txt):
try:
cur = self.conn.cursor()
cur.execute("SELECT valor FROM variaveis WHERE variavel=?", (variavel_txt, ))
row = cur.fetchall()
if row == []:
return row
return row[0][0]
except Error as e:
print(e)
def check_admin(self):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM admin WHERE login='admin'")
row = cur.fetchall()
if row == []:
return row
return [item for item in row[0]]
except Error as e:
print(e)
def check_usuario(self, login):
try:
cur = self.conn.cursor()
cur.execute("SELECT * FROM usuarios WHERE login=?", (login, ))
row = cur.fetchall()
if row == []:
return row
return [item for item in row[0]]
except Error as e:
print(e)
def check_senha(self, login):
try:
cur = self.conn.cursor()
cur.execute("SELECT senha FROM usuarios WHERE login=?", (login, ))
rows = cur.fetchall()
if rows:
return [item[0] for item in rows][0]
except Error as e:
print(e)
def set_senha(self, login, senha):
try:
sql = "UPDATE usuarios SET senha = | |
42)
@parameterized.parameters([
[42, r'\'int\' object is not iterable'],
[[47.11], r'location is not integer-like \(found type: float\)']
])
def test_initializer_locations_type_error(self, locations, message):
focus = (
_random_operation(1, 2),
_random_operation(1)
)
context = transform.TransformationContext(
circuit.Circuit(5, None),
circuit.Circuit(5, None),
circuit.Circuit(5, None)
)
with self.assertRaisesRegex(TypeError, message):
transform.AttentionCircuit(focus, context, locations=locations)
def test_initializer_locations_length_error(self):
focus = (
_random_operation(1, 2),
_random_operation(1)
)
context = transform.TransformationContext(
circuit.Circuit(5, [
_random_operation(2),
_random_operation(3),
_random_operation(4)
]),
circuit.Circuit(5, [
_random_operation(0)
]),
circuit.Circuit(5, [
_random_operation(0),
_random_operation(1, 2)
])
)
with self.assertRaisesRegex(
ValueError,
r'inconsistent lengths for focus and locations: 2 vs. 1'):
transform.AttentionCircuit(focus, context, locations=(3,))
class TransformationContextTest(parameterized.TestCase):
def test_initializer_and_getters(self):
# preparation work: create three circuits before, between and after
num_qubits = 5
before = circuit.Circuit(num_qubits, [
_random_operation(0, 2),
_random_operation(4),
_random_operation(1)
])
between = circuit.Circuit(num_qubits, [
_random_operation(0),
_random_operation(4)
])
after = circuit.Circuit(num_qubits, [
_random_operation(0, 1),
_random_operation(1, 2),
_random_operation(2, 3, 4)
])
# construct the TransformationContext
context = transform.TransformationContext(before, between, after)
# check before, between and after
self.assertIs(context.before(), before)
self.assertIs(context.between(), between)
self.assertIs(context.after(), after)
@parameterized.parameters([
[
42,
circuit.Circuit(5, None),
circuit.Circuit(5, None),
'int, Circuit, Circuit'
],
[
circuit.Circuit(6, None),
47.11,
circuit.Circuit(6, None),
'Circuit, float, Circuit'
],
[
circuit.Circuit(7, None),
circuit.Circuit(7, None),
'hello',
'Circuit, Circuit, str'
],
])
def test_initializer_type_error(self, before, between, after, type_string):
with self.assertRaisesRegex(
TypeError,
r'before, between and after must be Circuits \(found types: %s\)'
%type_string):
transform.TransformationContext(before, between, after)
@parameterized.parameters([
[7, 5, 5],
[8, 4, 8],
[3, 3, 6],
[2, 3, 4]
])
def test_initializer_inconsistent_num_qubits_error(self,
num_before,
num_between,
num_after):
before = circuit.Circuit(num_before, None)
between = circuit.Circuit(num_between, None)
after = circuit.Circuit(num_after, None)
with self.assertRaisesRegex(
ValueError,
r'inconsistent number of qubits for before, between and after:'
r' \(%d, %d, %d\)'%(num_before, num_between, num_after)):
transform.TransformationContext(before, between, after)
def test_inject(self):
# preparation work: create operations
num_qubits = 5
operation_a = _random_operation(0)
operation_b = _random_operation(0, 1)
operation_c1 = _random_operation(1)
operation_c2 = _random_operation(1, 2)
operation_c3 = _random_operation(2)
operation_d1 = _random_operation(2, 3)
operation_d2 = _random_operation(3)
operation_e1 = _random_operation(3, 4)
operation_e2 = _random_operation(4)
# preparation work: construct the TransformationContext
context = transform.TransformationContext(
circuit.Circuit(num_qubits, [operation_a]),
circuit.Circuit(num_qubits, [operation_c1, operation_c2, operation_c3]),
circuit.Circuit(num_qubits, [operation_e1, operation_e2])
)
# call the method to be tested
circ_full = context.inject([operation_b], [operation_d1, operation_d2])
# check type for circ_full
self.assertIs(type(circ_full), circuit.Circuit)
# check value for circ_full
self.assertTrue(_elementwise_is(
circ_full.get_operation_sequence(),
[
operation_a,
operation_b,
operation_c1,
operation_c2,
operation_c3,
operation_d1,
operation_d2,
operation_e1,
operation_e2
]
))
@parameterized.parameters([
[[42], [_random_operation(1, 2)]],
[[_random_operation(1, 2)], [42]]
])
def test_inject_type_error(self, operations_first, operations_second):
num_qubits = 4
context = transform.TransformationContext(
circuit.Circuit(num_qubits, None),
circuit.Circuit(num_qubits, None),
circuit.Circuit(num_qubits, None)
)
with self.assertRaisesRegex(
TypeError,
r'found illegal type\(s\) in operation_sequence: int \(expected:'
r' Operation\)'):
context.inject(operations_first, operations_second)
class FocusSingleOperationTest(parameterized.TestCase):
@parameterized.parameters([3, -2]) # both locations are equivalent
def test_successful(self, location):
# preparation work
operation0 = _random_operation(0)
operation1 = _random_operation(0, 1)
operation2 = _random_operation(1)
operation3 = _random_operation(1, 2)
operation4 = _random_operation(2)
circ = circuit.Circuit(5, [
operation0,
operation1,
operation2,
operation3,
operation4
])
# call the function to be tested
attention_circ = transform.focus_single_operation(circ, location)
# check type of attention_circ
self.assertIs(type(attention_circ), transform.AttentionCircuit)
# check the focus of attention_circ
self.assertLen(attention_circ, 1)
self.assertTrue(_elementwise_is(attention_circ.focus(), [operation3]))
# check the context of attention_circ
context = attention_circ.context()
self.assertTrue(_elementwise_is(
context.before().get_operation_sequence(),
[operation0, operation1, operation2]
))
self.assertEmpty(context.between())
self.assertTrue(_elementwise_is(
context.after().get_operation_sequence(),
[operation4]
))
# check the locations of attention_circ
self.assertTupleEqual(attention_circ.locations(), (3,))
def test_circ_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'circ is not a Circuit \(found type: range\)'):
transform.focus_single_operation(range(10), 3)
def test_location_type_error(self):
circ = circuit.Circuit(5, None)
with self.assertRaisesRegex(
TypeError,
r'location is not integer-like \(found type: float\)'):
transform.focus_single_operation(circ, 47.11)
@parameterized.parameters([5, -6])
def test_location_out_of_bounds_error(self, location):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2),
])
with self.assertRaisesRegex(
IndexError,
r'location %d out of bounds for a Circuit of length 5'%location):
transform.focus_single_operation(circ, location)
def _positive_example_circuit(*segments_and_operations):
operations = []
segments = {
'focus': [],
'before': [],
'between': [],
'after': []
}
max_qubit = 0
for location, (segment_tag, operation) in enumerate(segments_and_operations):
operations.append(operation)
segments[segment_tag].append(location)
max_qubit = np.maximum(max_qubit, max(operation.get_qubits()))
circ = circuit.Circuit(max_qubit + 1, operations)
# <checking that the example circuit makes sense>
assert len(segments['focus']) == 2
location_first, location_second = segments['focus'] # length checked in previous line, so pylint: disable=unbalanced-tuple-unpacking
assert all(
location_before < location_second
for location_before in segments['before']
)
assert all(
location_first < location_between < location_second
for location_between in segments['between']
)
assert all(
location_after > location_first
for location_after in segments['after']
)
pool_to_the_left = [
location_before
for location_before in segments['before']
if location_before > location_first
]
pool_to_the_right = [
location_after
for location_after in segments['after']
if location_after < location_second
]
assert all(
circ[location_second].commutes_trivially_with(circ[location])
for location in segments['between'] + pool_to_the_right
)
assert all(
circ[location_first].commutes_trivially_with(circ[location])
for location in pool_to_the_left + segments['between']
)
assert all(
loc0 < loc1 or circ[loc0].commutes_trivially_with(circ[loc1])
for loc0, loc1 in itertools.product(pool_to_the_left, segments['between'])
)
assert all(
loc0 < loc1 or circ[loc0].commutes_trivially_with(circ[loc1])
for loc0, loc1 in itertools.product(pool_to_the_left, pool_to_the_right)
)
assert all(
loc0 < loc1 or circ[loc0].commutes_trivially_with(circ[loc1])
for loc0, loc1 in itertools.product(segments['between'],
pool_to_the_right)
)
# </checking that the example circuit makes sense>
return circ, transform.AttentionCircuit(
focus=circ[segments['focus']].get_operation_sequence(),
context=transform.TransformationContext(
before=circ[segments['before']],
between=circ[segments['between']],
after=circ[segments['after']]
),
locations=segments['focus']
)
def _positive_focus_operation_pair_examples():
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['focus', _random_operation(0, 1)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['focus', _random_operation(1, 0)]
)
yield _positive_example_circuit(
['before', _random_operation(0, 1)],
['before', _random_operation(0)],
['focus', _random_operation(0, 1)],
['focus', _random_operation(0, 1)],
['after', _random_operation(1)],
['after', _random_operation(0)]
)
yield _positive_example_circuit(
['focus', _random_operation(1, 2)],
['between', _random_operation(0)],
['between', _random_operation(3)],
['focus', _random_operation(1, 2)]
)
yield _positive_example_circuit(
['before', _random_operation(0, 1)],
['before', _random_operation(1, 2)],
['before', _random_operation(0)],
['focus', _random_operation(1, 2)],
['between', _random_operation(0)],
['between', _random_operation(3)],
['focus', _random_operation(1, 2)],
['after', _random_operation(1)],
['after', _random_operation(2)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['before', _random_operation(2, 3)],
['between', _random_operation(3, 4)],
['focus', _random_operation(1, 2)]
)
yield _positive_example_circuit(
['focus', _random_operation(2, 3)],
['between', _random_operation(0, 1)],
['after', _random_operation(1, 2)],
['focus', _random_operation(3, 4)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['before', _random_operation(3, 4)],
['before', _random_operation(2, 3)],
['focus', _random_operation(1, 2)]
)
yield _positive_example_circuit(
['focus', _random_operation(2, 3)],
['after', _random_operation(1, 2)],
['after', _random_operation(0, 1)],
['focus', _random_operation(3, 4)]
)
yield _positive_example_circuit(
['focus', _random_operation(0, 1)],
['before', _random_operation(2, 3)],
['after', _random_operation(0, 3)],
['focus', _random_operation(1, 2)]
)
for enclosed_operations in itertools.permutations([
['before', _random_operation(2)],
['between', _random_operation(3)],
['after', _random_operation(0)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(0, 1)]] +
list(enclosed_operations) +
[['focus', _random_operation(1, 2)]]
))
for enclosed_operations in itertools.permutations([
['before', _random_operation(3)],
['between', _random_operation(4)],
['after', _random_operation(0, 1)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(1, 2)]] +
list(enclosed_operations) +
[['focus', _random_operation(2, 3)]]
))
for enclosed_operations in itertools.permutations([
['before', _random_operation(2, 3)],
['between', _random_operation(4)],
['after', _random_operation(0)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(0, 1)]] +
list(enclosed_operations) +
[['focus', _random_operation(1, 2)]]
))
for enclosed_operations in itertools.permutations([
['before', _random_operation(3, 4)],
['between', _random_operation(5)],
['after', _random_operation(0, 1)]]):
yield _positive_example_circuit(*(
[['focus', _random_operation(1, 2)]] +
list(enclosed_operations) +
[['focus', _random_operation(2, 3)]]
))
class FocusOperationPairTest(parameterized.TestCase):
@parameterized.parameters(_positive_focus_operation_pair_examples())
def test_positive(self, circ, att_circ_expected):
assert len(att_circ_expected) == 2
location_first, location_second = att_circ_expected.locations()
# call the function to be tested
att_circ = transform.focus_operation_pair(
circ,
location_first,
location_second
)
# check the type for att_circ
self.assertIsInstance(att_circ, transform.AttentionCircuit)
# check the focus for att_circ
self.assertLen(att_circ, 2)
self.assertTrue(_elementwise_is(
att_circ.focus(),
att_circ_expected.focus()
))
# check the locations for att_circ
self.assertTupleEqual(
att_circ.locations(),
(location_first, location_second)
)
# check the context for att_circ
self.assertTrue(_elementwise_is(
att_circ.context().before().get_operation_sequence(),
att_circ_expected.context().before().get_operation_sequence()
))
self.assertTrue(_elementwise_is(
att_circ.context().between().get_operation_sequence(),
att_circ_expected.context().between().get_operation_sequence()
))
self.assertTrue(_elementwise_is(
att_circ.context().after().get_operation_sequence(),
att_circ_expected.context().after().get_operation_sequence()
))
@parameterized.parameters([
[
circuit.Circuit(1, [
_random_operation(0),
_random_operation(0),
_random_operation(0)
]),
0, 2
],
[
circuit.Circuit(2, [
_random_operation(0, 1),
_random_operation(0),
_random_operation(0, 1)
]),
0, 2
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2)
]),
0, 2
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(0, 2),
_random_operation(1, 2)
]),
0, 2
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(0, 2),
_random_operation(1),
_random_operation(1, 2)
]),
0, 3
],
[
circuit.Circuit(3, [
_random_operation(0, 1),
_random_operation(2),
_random_operation(0, 2),
_random_operation(1, 2)
]),
0, 3
],
[
circuit.Circuit(4, [
_random_operation(0, 1),
_random_operation(0, 3),
_random_operation(2, 3),
_random_operation(1, 2)
]),
0, 3
]
])
def test_negative(self, circ, location_first, location_second):
with self.assertRaises(transform.OperationsNotAlignedError):
transform.focus_operation_pair(circ, location_first, location_second)
def test_circ_type_error(self):
with self.assertRaisesRegex(
TypeError,
r'circ is not a Circuit \(found type: range\)'):
transform.focus_operation_pair(range(10), 3, 5)
def test_location_first_type_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2)
])
with self.assertRaisesRegex(
TypeError,
r'location_first is not integer-like \(found type: float\)'):
transform.focus_operation_pair(circ, 47.11, 3)
def test_location_second_type_error(self):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2)
])
with self.assertRaisesRegex(
TypeError,
r'location_second is not integer-like \(found type: float\)'):
transform.focus_operation_pair(circ, 3, 47.11)
@parameterized.parameters([5, -6])
def test_location_first_out_of_bounds_error(self, location_first):
circ = circuit.Circuit(3, [
_random_operation(0),
_random_operation(0, 1),
_random_operation(1),
_random_operation(1, 2),
_random_operation(2),
])
with self.assertRaisesRegex(
IndexError,
r'location_first %d out of bounds for a Circuit | |
= id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita_prodotta,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
"""
if tutte_le_righe_completate:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
"""
if tutte_le_righe_completate():
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
else:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = False)
except Exception,e:
response.flash="Errore inserimento ddt {0}".format(e)
return locals()
# print row
# p.insert_rows()
pa.insert_rows()
# print pa.rows
pa.create_pdf()
# print request.folder
redirect(URL('ddt_clienti'))
return "ok"
@service.jsonrpc
@service.jsonrpc2
def insert_mod_ddt_preview(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
ddt_id = db(db.ddt_cliente.id == id_ddt).select().first()
# ddt_id.update_record(porto=porto,aspetto=aspetto,peso=peso,annotazioni=annotazioni,trasporto_a_mezzo=trasporto,causale_del_trasporto=causale,inizio_del_trasporto="",ditta_vettore=ditta,domicilio_vettore=domicilio,data_e_ora_del_ritiro="",user_id = auth.user_id)
# print "CIAOOOO ",ddt_id
id_cliente = ddt_id.id_cliente
nome_cliente = ddt_id.nome_cliente
row = db(db.clienti.id==id_cliente).select().first()
try:
consegna = consegna.split(",")
except:
consegna = "Come intestazione ,,,,,,".split(",")
"""
Insert into saved ddt table
"""
numero_ddt_corrente = ddt_id.numero_ddt
# print numero_ddt_corrente
# db.saved_ddt.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
data_scelta =""
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
pa = DDT(d,numero_ddt_corrente,"Cliente",anteprima=True)
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("<NAME>", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("<NAME>", "<NAME>", "<NAME>EIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
# print "ciao ",ddt_id
"""
1) salvare le righe del ddt in una tabella per creare UNDO
2) cancellare i riferimenti a saved_righe_in_ddt_cliente
3) inserire le righe ddt as usual
"""
# return ""
# tutte_le_righe_completate = True
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
try:
for row in rows:
id_ordine = row["id_ordine"]
codice_articolo = row["codice_articolo"]
codice_ordine = row["codice_ordine"]
if "commento" not in codice_articolo:
quantita = row['quantita_prodotta']
prezzo = row['prezzo']
riferimento_ordine = row["riferimento_ordine"]+" - POS."+row["n_riga"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
id_riga_ordine = row["id_riga_ordine"]
# print row
q = db(db.produzione_righe_per_ddt.id_riga_ordine == id_riga_ordine).select().first()
# print "Quantita trovata già prodotta : ",q
if q is not None:
try:
quantita_richiesta = int(row["quantita_richiesta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_prodotta_fino_ad_ora = 0
quantita_prodotta_fino_ad_ora = int(q.quantita_prodotta) + quantita_prodotta
# r = db(db.produzione_righe_per_ddt.id_riga_ordine == str(id_riga_ordine)).select().first()
# r.update_record(quantita_prodotta=str(quantita_prodotta_fino_ad_ora))
except Exception,e:
response.flash="Controlla le quantità"
# print e
return "ok"
else:
"""
E' la prima volta che inserisco la riga della quantità
"""
# print "E' la prima volta che inserisco la riga della quantita"
quantita_prodotta_fino_ad_ora = int(row["quantita_prodotta"])
quantita_prodotta = int(row["quantita_prodotta"])
quantita_richiesta = int(row["quantita_richiesta"])
db.produzione_righe_per_ddt.insert(id_riga_ordine = id_riga_ordine,quantita_prodotta = quantita_prodotta)
if quantita_prodotta_fino_ad_ora >= int(quantita_richiesta):
# print "Chiudo la riga"
# to_update = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
# to_update.update_record(riga_emessa_in_ddt = True)
pass
else:
# tutte_le_righe_completate = Fals
pass
# print "SON<NAME>"
# print "{0}".format(tutte_le_righe_completate)
quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
d = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine).select().first()["commento"]
# print "COMMENTO {0}, RIGA ORDINE {1}".format(d,row.id_riga_ordine)
descrizione = d
row.codice_articolo=" "
n_riga=" "
riferimento_ordine=" "
quantita_prodotta=0
prezzo=" "
evasione=" "
row["u_m"]=" "
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],str(row.quantita_prodotta))
# db.saved_righe_in_ddt_cliente.insert(saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita_prodotta,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
"""
if tutte_le_righe_completate:
ordine = db(db.ordine_cliente.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
"""
except Exception,e:
response.flash="Errore inserimento ddt {0}".format(e)
return locals()
# print row
# p.insert_rows()
pa.insert_rows()
# print pa.rows
pa.create_pdf()
# print request.folder
redirect(URL('ddt_clienti'))
return "ok"
def tutte_le_righe_completate():
rows = db(db.righe_in_ddt_cliente.user_id == auth.user_id).select()
righe_completate = True
# print "IN TUTTE LE RIGHE COMPLETATE -----------------"
try:
for row in rows:
if row.id_riga_ordine is None or len(row.id_riga_ordine)<1:
id_riga_ordine=db((db.righe_in_ordine_cliente.id_ordine_cliente == row.id_ordine) & (db.righe_in_ordine_cliente.n_riga ==row.n_riga)).select().first()["id"]
else:
id_riga_ordine = row.id_riga_ordine
# print row
# print "-----"
codice_articolo = row["codice_articolo"]
if "commento" not in codice_articolo:
riga = db(db.righe_in_ordine_cliente.id == id_riga_ordine).select().first()
# print riga
if not riga.riga_emessa_in_ddt:
# print "non tutte le righe sono state completate"
righe_completate = False
except Exception,e:
# print e
# quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
pass
return righe_completate
def riga_completata(id_riga_ordine):
row = db(db.righe_in_ordine_cliente.id == id_riga_ordine ).select().first()
# print row
return row.riga_emessa_in_ddt
def tutte_le_righe_completate_in_ordine_id(id_ordine):
rows = db(db.righe_in_ordine_cliente.id_ordine_cliente == id_ordine).select()
righe_completate = True
try:
for row in rows:
codice_articolo = row["codice_articolo"]
if "commento" not in codice_articolo:
if not row.riga_emessa_in_ddt:
# print "non tutte le righe sono state completate"
righe_completate = False
except Exception,e:
# print e
# quantita_totale_prodotta = int(quantita_prodotta) + int(quantita_prodotta_fino_ad_ora)
pass
return righe_completate
@service.jsonrpc
@service.jsonrpc2
def insert_ddt_fornitori(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
data_scelta = args[11]
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
# print args
ddt_id = db(db.ddt_fornitore.id == id_ddt).select().first()
ddt_id.update_record(porto=porto,aspetto=aspetto,peso=peso,annotazioni=annotazioni,trasporto_a_mezzo=trasporto,causale_del_trasporto=causale,inizio_del_trasporto="",ditta_vettore=ditta,domicilio_vettore=domicilio,data_e_ora_del_ritiro="",user_id = auth.user_id)
id_fornitore = ddt_id.id_fornitore
nome_fornitore = ddt_id.nome_fornitore
row = db(db.fornitori.id==id_fornitore).select().first()
consegna = consegna.split(",")
"""
Insert into saved ddt table
"""
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
ddt_id.update_record(numero_ddt=numero_ddt_corrente)
db.saved_ddt_fornitori.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = d, user_id = auth.user_id)
row2 = db(db.ddt).select().first()
row2.update_record(numero_ddt = numero_ddt_corrente)
pa = DDT(d,numero_ddt_corrente,"Fornitore")
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("<NAME>", "<NAME>", "<NAME> 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# p.info_trasporto("Vettore", "TNT GLOBAL EXPRESS SPA", "VENDITA","29/11/16", "LODI", "28/11/16")
pa.info_trasporto(trasporto, ditta, causale,"", domicilio, "")
# p.footer("scatola su bancale","100","ASSEGNATO","NOTE","123")
pa.footer(aspetto,colli,porto,annotazioni,peso)
rows = db(db.righe_in_ddt_fornitore.user_id == auth.user_id).select()
for row in rows:
quantita = row['quantita']
prezzo = row['prezzo']
codice_articolo = row["codice_articolo"]
riferimento_ordine = row["codice_ordine"]+" - POS."+row["n_riga"]
id_ordine = row["id_ordine"]
codice_ordine = row["codice_ordine"]
n_riga = row["n_riga"]
codice_iva = row["codice_iva"]
evasione = row["evasione"]
ordine=db(db.ordine_fornitore.id == id_ordine).select().first()
ordine.update_record(ddt_completato = True)
# print "CODICE ARTICOLO : ",codice_articolo
if len(codice_articolo)>0:
# descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
if "commento" not in codice_articolo:
descrizione = db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().descrizione
else:
descrizione = row.descrizione
row.codice_articolo=""
n_riga=""
pa.add_row(row.codice_articolo,descrizione,riferimento_ordine,row["u_m"],row["quantita"])
db.saved_righe_in_ddt_fornitore.insert(saved_ddt_id = ddt_id.id,id_ordine = id_ordine,codice_ordine = codice_ordine, n_riga = n_riga,codice_articolo=codice_articolo,descrizione=descrizione,riferimento_ordine=row["riferimento_ordine"],u_m=row["u_m"],quantita=quantita,prezzo=prezzo,evasione=evasione,user_id = auth.user_id,codice_iva=row["codice_iva"])
else:
descrizione =row.descrizione
pa.add_row(row.codice_articolo,descrizione,"","","")
# print descrizione
# print row
# p.insert_rows()
pa.insert_rows()
pa.create_pdf()
# print request.folder
redirect(URL('ddt_fornitori'))
return "ok"
@service.jsonrpc
@service.jsonrpc2
def insert_ddt_fornitori_preview(*args):
id_ddt=args[0]
consegna = args[1]
trasporto = args[2]
ditta = args[3]
domicilio = args[4]
aspetto = args[5]
colli = args[6]
porto = args[7]
annotazioni = args[8]
peso = args[9]
causale = args[10]
data_scelta = args[11]
if len(data_scelta)>0:
d = data_scelta
else:
d = datetime.datetime.now().date().strftime("%d/%m/%Y")
# print args
ddt_id = db(db.ddt_fornitore.id == id_ddt).select().first()
id_fornitore = ddt_id.id_fornitore
nome_fornitore = ddt_id.nome_fornitore
row = db(db.fornitori.id==id_fornitore).select().first()
consegna = consegna.split(",")
"""
Insert into saved ddt table
"""
numero_ddt_salvato = db(db.ddt).select().first()["numero_ddt"]
n = numero_ddt_salvato.split("/")[0]
a = numero_ddt_salvato.split("/")[1]
new_n = str(int(n) + 1)
numero_ddt_corrente = new_n + "/" + a
# ddt_id.update_record(numero_ddt=numero_ddt_corrente)
# db.saved_ddt_fornitori.insert(numero_ddt = numero_ddt_corrente,saved_ddt_id = ddt_id.id, data_inserimento = datetime.datetime.now(), user_id = auth.user_id)
row2 = db(db.ddt).select().first()
# row2.update_record(numero_ddt = numero_ddt_corrente)
pa = DDT(d,numero_ddt_corrente,"Fornitore",anteprima=True)
# print "DDT CORRENTE : ",numero_ddt_corrente
pa.rows=[]
# p.intestazione("LEONARDO SPA", "ROMA","PIAZZA MONTE GRAPPA 4", "00195", "RM", "IT", "123456", "00881841001")
pa.intestazione(row.nome, row.citta,row.indirizzo, row.cap, row.provincia, row.partita_iva, row.nazione,row.codice_fiscale)
# p.consegna("<NAME>", "<NAME>", "VIA ALBERT EINSTEIN 35", "50013", "FI")
try:
pa.consegna(consegna[0],consegna[1],consegna[2],consegna[3],consegna[4])
except:
pa.consegna("null","null","null","null","null")
# | |
# -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, Future
import importlib
import logging
import threading
import traceback
from typing import TYPE_CHECKING, Callable, Dict, List, Sequence, Type, Any, TypeVar, cast # pylint: disable=line-too-long
from dftimewolf.cli import curses_display_manager as cdm
from dftimewolf.config import Config
from dftimewolf.lib import errors, utils
from dftimewolf.lib.containers.interface import AttributeContainer
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.module import ThreadAwareModule, BaseModule
if TYPE_CHECKING:
from dftimewolf.lib import module as dftw_module
from dftimewolf.lib.containers import interface
T = TypeVar("T", bound="interface.AttributeContainer") # pylint: disable=invalid-name,line-too-long
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
@dataclass
class StatsEntry:
"""A simple dataclass to store module-related statistics.
Attributes:
module_type: Type of the module that generated the stats.
module_name: Name of the module that generated the stats. This has the
same value as module_type when no runtime_name has been specified for
the module.
stats: Dictionary of stats to store. Contents are arbitrary, but
keys must be strings.
"""
module_type: str
module_name: str
stats: Dict[str, Any]
class DFTimewolfState(object):
"""The main State class.
Attributes:
command_line_options (dict[str, Any]): Command line options passed to
dftimewolf.
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
stats_store: store for statistics generated by modules.
"""
def __init__(self, config: Type[Config]) -> None:
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self.command_line_options = {} # type: Dict[str, Any]
self._cache = {} # type: Dict[str, str]
self._module_pool = {} # type: Dict[str, BaseModule]
self._state_lock = threading.Lock()
self._stats_lock = threading.Lock()
self._threading_event_per_module = {} # type: Dict[str, threading.Event]
self.config = config
self.errors = [] # type: List[DFTimewolfError]
self.global_errors = [] # type: List[DFTimewolfError]
self.recipe = {} # type: Dict[str, Any]
self.store = {} # type: Dict[str, List[interface.AttributeContainer]]
self.stats_store = [] # type: List[StatsEntry]
self.streaming_callbacks = {} # type: Dict[Type[interface.AttributeContainer], List[Callable[[Any], Any]]] # pylint: disable=line-too-long
self._abort_execution = False
self.stdout_log = True
def _InvokeModulesInThreads(self, callback: Callable[[Any], Any]) -> None:
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def ImportRecipeModules(self, module_locations: Dict[str, str]) -> None:
"""Dynamically loads the modules declared in a recipe.
Args:
module_location (dict[str, str]): A dfTimewolf module name - Python module
mapping. e.g.:
{'GRRArtifactCollector': 'dftimewolf.lib.collectors.grr_hosts'}
Raises:
errors.RecipeParseError: if a module requested in a recipe does not
exist in the mapping.
"""
for module in self.recipe['modules'] + self.recipe.get('preflights', []):
name = module['name']
if name not in module_locations:
msg = (f'In {self.recipe["name"]}: module {name} cannot be found. '
'It may not have been declared.')
raise errors.RecipeParseError(msg)
logger.debug('Loading module {0:s} from {1:s}'.format(
name, module_locations[name]))
location = module_locations[name]
try:
importlib.import_module(location)
except ModuleNotFoundError as exception:
msg = f'Cannot find Python module for {name} ({location}): {exception}'
raise errors.RecipeParseError(msg)
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
self.recipe = recipe
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.ImportRecipeModules(module_locations)
for module_definition in module_definitions + preflight_definitions:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
runtime_name = module_definition.get('runtime_name')
if not runtime_name:
runtime_name = module_name
# pytype: disable=wrong-arg-types
self._module_pool[runtime_name] = module_class(self, name=runtime_name)
# pytype: enable=wrong-arg-types
def FormatExecutionPlan(self) -> str:
"""Formats execution plan.
Returns information about loaded modules and their corresponding arguments
to stdout.
Returns:
str: String representation of loaded modules and their parameters.
"""
plan = ""
maxlen = 0
modules = self.recipe.get('preflights', []) + self.recipe.get('modules', [])
for module in modules:
if not module['args']:
continue
spacing = len(max(module['args'].keys(), key=len))
maxlen = maxlen if maxlen > spacing else spacing
for module in modules:
runtime_name = module.get('runtime_name')
if runtime_name:
plan += '{0:s} ({1:s}):\n'.format(runtime_name, module['name'])
else:
plan += '{0:s}:\n'.format(module['name'])
new_args = utils.ImportArgsFromDict(
module['args'], self.command_line_options, self.config)
if not new_args:
plan += ' *No params*\n'
for key, value in new_args.items():
plan += ' {0:s}{1:s}\n'.format(key.ljust(maxlen + 3), repr(value))
return plan
def LogExecutionPlan(self) -> None:
"""Logs the result of FormatExecutionPlan() using the base logger."""
for line in self.FormatExecutionPlan().split('\n'):
logger.debug(line)
def AddToCache(self, name: str, value: Any) -> None:
"""Thread-safe method to add data to the state's cache.
If the cached item is already in the cache it will be
overwritten with the new value.
Args:
name (str): string with the name of the cache variable.
value (object): the value that will be stored in the cache.
"""
with self._state_lock:
self._cache[name] = value
def GetFromCache(self, name: str, default_value: Any=None) -> Any:
"""Thread-safe method to get data from the state's cache.
Args:
name (str): string with the name of the cache variable.
default_value (object): the value that will be returned if
the item does not exist in the cache. Optional argument
and defaults to None.
Returns:
object: object from the cache that corresponds to the name, or
the value of "default_value" if the cache does not contain
the variable.
"""
with self._state_lock:
return self._cache.get(name, default_value)
def StoreContainer(self, container: "interface.AttributeContainer") -> None:
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._state_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def StoreStats(self, stats_entry: StatsEntry) -> None:
"""Thread-safe method to store stats in the state's stats store.
Args:
statsentry: The stats object to store.
"""
with self._stats_lock:
self.stats_store.append(stats_entry)
def GetStats(self) -> List[StatsEntry]:
"""Get stats entries that have been stored in the state.
Returns:
The stats objects stored in the state's stats store.
"""
with self._stats_lock:
return self.stats_store
def GetContainers(self,
container_class: Type[T],
pop: bool=False) -> Sequence[T]:
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
pop (Optional[bool]): Whether to remove the containers from the state when
they are retrieved.
Returns:
Collection[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._state_lock:
container_objects = cast(
List[T], self.store.get(container_class.CONTAINER_TYPE, []))
if pop:
self.store[container_class.CONTAINER_TYPE] = []
return tuple(container_objects)
def DedupeContainers(self, container_class: Type[T]) -> None:
"""Thread safe deduping of containers of the given type.
This requires the container being deduped to override `__eq__()`.
Args:
container_class (type): AttributeContainer class to dedupe.
"""
with self._state_lock:
deduped = []
for c in self.store.get(container_class.CONTAINER_TYPE, []):
if c not in deduped:
deduped.append(c)
self.store[container_class.CONTAINER_TYPE] = deduped
def _SetupModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
logger.info('Setting up module: {0:s}'.format(runtime_name))
new_args = utils.ImportArgsFromDict(
module_definition['args'], self.command_line_options, self.config)
module = self._module_pool[runtime_name]
try:
self._RunModuleSetUp(module, **new_args)
except errors.DFTimewolfError:
msg = "A critical error occurred in module {0:s}, aborting execution."
logger.critical(msg.format(module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='dftimewolf', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
self._threading_event_per_module[runtime_name] = threading.Event()
self.CleanUp()
def _RunModuleSetUp(self,
module: BaseModule,
**new_args: Dict[str, object]) -> None:
"""Runs SetUp of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The modulke that will have SetUp called.
new_args: kwargs to pass to SetUp."""
module.SetUp(**new_args)
def _RunModuleProcess(self, module: BaseModule) -> None:
"""Runs Process of a single module.
Designed to be wrapped | |
options for keys:
id - traffic type id
kvmnetworklabel - The network name label of the physical device
dedicated to this traffic on a KVM host
vmwarenetworklabel - The network name label of the physical device
dedicated to this traffic on a VMware host
xennetworklabel - The network name label of the physical device
dedicated to this traffic on a XenServer host
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('updateTrafficType', args)
def listTrafficTypeImplementors(self, args={}):
'''
Lists implementors of implementor of a network traffic type or implementors of
all network traffic types
args - A dictionary. The following are options for keys:
keyword - List by keyword
page -
pagesize -
traffictype - Optional. The network traffic type, if specified, return its
implementor. Otherwise, return all traffic types with their implementor
page - Pagination
'''
return self.request('listTrafficTypeImplementors', args)
def generateUsageRecords(self, args={}):
'''
Generates usage records. This will generate records only if there any records to
be generated, i.e if the scheduled usage job was not run or failed
args - A dictionary. The following are options for keys:
enddate - End date range for usage record query. Use yyyy-MM-dd as the date
format, e.g. startDate=2009-06-03.
startdate - Start date range for usage record query. Use yyyy-MM-dd as the
date format, e.g. startDate=2009-06-01.
domainid - List events for the specified domain.
'''
if 'enddate' not in args:
raise RuntimeError("Missing required argument 'enddate'")
if 'startdate' not in args:
raise RuntimeError("Missing required argument 'startdate'")
return self.request('generateUsageRecords', args)
def listUsageRecords(self, args={}):
'''
Lists usage records for accounts
args - A dictionary. The following are options for keys:
enddate - End date range for usage record query. Use yyyy-MM-dd as the date
format, e.g. startDate=2009-06-03.
startdate - Start date range for usage record query. Use yyyy-MM-dd as the
date format, e.g. startDate=2009-06-01.
account - List usage records for the specified user.
accountid - List usage records for the specified account
domainid - List usage records for the specified domain.
keyword - List by keyword
page -
pagesize -
projectid - List usage records for specified project
type - List usage records for the specified usage type
page - Pagination
'''
if 'enddate' not in args:
raise RuntimeError("Missing required argument 'enddate'")
if 'startdate' not in args:
raise RuntimeError("Missing required argument 'startdate'")
return self.request('listUsageRecords', args)
def listUsageTypes(self, args={}):
'''
List Usage Types
args - A dictionary. The following are options for keys:
page - Pagination
'''
return self.request('listUsageTypes', args)
def addTrafficMonitor(self, args={}):
'''
Adds Traffic Monitor Host for Direct Network Usage
args - A dictionary. The following are options for keys:
url - URL of the traffic monitor Host
zoneid - Zone in which to add the external firewall appliance.
'''
if 'url' not in args:
raise RuntimeError("Missing required argument 'url'")
if 'zoneid' not in args:
raise RuntimeError("Missing required argument 'zoneid'")
return self.request('addTrafficMonitor', args)
def deleteTrafficMonitor(self, args={}):
'''
Deletes an traffic monitor host.
args - A dictionary. The following are options for keys:
id - Id of the Traffic Monitor Host.
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deleteTrafficMonitor', args)
def listTrafficMonitors(self, args={}):
'''
List traffic monitor Hosts.
args - A dictionary. The following are options for keys:
zoneid - zone Id
keyword - List by keyword
page -
pagesize -
page - Pagination
'''
if 'zoneid' not in args:
raise RuntimeError("Missing required argument 'zoneid'")
return self.request('listTrafficMonitors', args)
def attachVolume(self, args={}):
'''
Attaches a disk volume to a virtual machine.
args - A dictionary. The following are options for keys:
id - the ID of the disk volume
virtualmachineid - the ID of the virtual machine
deviceid - the ID of the device to map the volume to within the guest OS. If
no deviceId is passed in, the next available deviceId will be chosen. Possible
values for a Linux OS are:* 1 - /dev/xvdb* 2 - /dev/xvdc* 4 - /dev/xvde* 5 -
/dev/xvdf* 6 - /dev/xvdg* 7 - /dev/xvdh* 8 - /dev/xvdi* 9 - /dev/xvdj
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
if 'virtualmachineid' not in args:
raise RuntimeError("Missing required argument 'virtualmachineid'")
return self.request('attachVolume', args)
def detachVolume(self, args={}):
'''
Detaches a disk volume from a virtual machine.
args - A dictionary. The following are options for keys:
deviceid - the device ID on the virtual machine where volume is detached
from
id - the ID of the disk volume
virtualmachineid - the ID of the virtual machine where the volume is
detached from
'''
return self.request('detachVolume', args)
def createVolume(self, args={}):
'''
Creates a disk volume from a disk offering. This disk volume must still be
attached to a virtual machine to make use of it.
args - A dictionary. The following are options for keys:
name - the name of the disk volume
account - the account associated with the disk volume. Must be used with the
domainId parameter.
diskofferingid - the ID of the disk offering. Either diskOfferingId or
snapshotId must be passed in.
domainid - the domain ID associated with the disk offering. If used with the
account parameter returns the disk volume associated with the account for the
specified domain.
projectid - the project associated with the volume. Mutually exclusive with
account parameter
size - Arbitrary volume size
snapshotid - the snapshot ID for the disk volume. Either diskOfferingId or
snapshotId must be passed in.
zoneid - the ID of the availability zone
'''
if 'name' not in args:
raise RuntimeError("Missing required argument 'name'")
return self.request('createVolume', args)
def deleteVolume(self, args={}):
'''
Deletes a detached disk volume.
args - A dictionary. The following are options for keys:
id - The ID of the disk volume
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
return self.request('deleteVolume', args)
def listVolumes(self, args={}):
'''
Lists all volumes.
args - A dictionary. The following are options for keys:
account - List resources by account. Must be used with the domainId
parameter.
domainid - list only resources belonging to the domain specified
hostid - list volumes on specified host
id - the ID of the disk volume
isrecursive - defaults to false, but if true, lists all resources from the
parent specified by the domainId till leaves.
keyword - List by keyword
listall - If set to false, list only resources belonging to the command's
caller; if set to true - list resources that the caller is authorized to see.
Default value is false
name - the name of the disk volume
page -
pagesize -
podid - the pod id the disk volume belongs to
projectid - list firewall rules by project
type - the type of disk volume
virtualmachineid - the ID of the virtual machine
zoneid - the ID of the availability zone
page - Pagination
'''
return self.request('listVolumes', args)
def extractVolume(self, args={}):
'''
Extracts volume
args - A dictionary. The following are options for keys:
id - the ID of the volume
mode - the mode of extraction - HTTP_DOWNLOAD or FTP_UPLOAD
zoneid - the ID of the zone where the volume is located
url - the url to which the volume would be extracted
'''
if 'id' not in args:
raise RuntimeError("Missing required argument 'id'")
if 'mode' not in args:
raise RuntimeError("Missing required argument 'mode'")
if 'zoneid' not in args:
raise RuntimeError("Missing required argument 'zoneid'")
return self.request('extractVolume', args)
def migrateVolume(self, args={}):
'''
Migrate volume
args - A dictionary. The following are options for keys:
storageid - destination storage pool ID to migrate the volume to
volumeid - the ID of the volume
'''
if 'storageid' not in args:
raise RuntimeError("Missing required argument 'storageid'")
if 'volumeid' not in args:
raise RuntimeError("Missing required argument 'volumeid'")
return self.request('migrateVolume', args)
def createVolumeOnFiler(self, args={}):
'''
Create a volume
args - A dictionary. The following | |
<reponame>anki-code/python-hunter
from __future__ import absolute_import
import linecache
import tokenize
from functools import partial
from os.path import basename
from os.path import exists
from os.path import splitext
from threading import current_thread
from .const import SITE_PACKAGES_PATHS
from .const import SYS_PREFIX_PATHS
from .util import CYTHON_SUFFIX_RE
from .util import LEADING_WHITESPACE_RE
from .util import MISSING
from .util import PY2
from .util import cached_property
from .util import get_func_in_mro
from .util import get_main_thread
from .util import if_same_code
__all__ = 'Event',
class Event(object):
"""
A wrapper object for Frame objects. Instances of this are passed to your custom functions or predicates.
Provides few convenience properties.
Args:
frame (Frame): A python `Frame <https://docs.python.org/3/reference/datamodel.html#frame-objects>`_ object.
kind (str): A string like ``'call'``, ``'line'``, ``'return'`` or ``'exception'``.
arg: A value that depends on ``kind``. Usually is ``None`` but for ``'return'`` or ``'exception'`` other values
may be expected.
tracer (:class:`hunter.tracer.Tracer`): The :class:`~hunter.tracer.Tracer` instance that created the event.
Needed for the ``calls`` and ``depth`` fields.
"""
frame = None
kind = None
arg = None
depth = None
calls = None
builtin = None
def __init__(self, frame, kind, arg, tracer=None, depth=None, calls=None, threading_support=MISSING):
if tracer is None:
if depth is None:
raise TypeError('Missing argument: depth (required because tracer was not given).')
if calls is None:
raise TypeError('Missing argument: calls (required because tracer was not given).')
if threading_support is MISSING:
raise TypeError('Missing argument: threading_support (required because tracer was not given).')
else:
depth = tracer.depth
calls = tracer.calls
threading_support = tracer.threading_support
#: The original Frame object.
#:
#: .. note::
#:
#: Not allowed in the builtin predicates (it's the actual Frame object).
#: You may access it from your custom predicate though.
self.frame = frame
if kind.startswith('c_'):
kind = kind[2:]
builtin = True
else:
builtin = False
#: If kind of the event is one of ``'c_call'``, ``'c_return'``, or ``'c_exception'`` then this will be True.
#:
#: :type: bool
self.builtin = builtin
#: The kind of the event, could be one of ``'call'``, ``'line'``, ``'return'``, ``'exception'``.
#:
#: :type: str
self.kind = kind
#: A value that depends on ``kind``
self.arg = arg
#: Tracing depth (increases on calls, decreases on returns).
#:
#: :type: int
self.depth = depth
#: A counter for total number of calls up to this Event.
#:
#: :type: int
self.calls = calls
#: A copy of the :attr:`hunter.tracer.Tracer.threading_support` flag.
#:
#: .. note::
#:
#: Not allowed in the builtin predicates. You may access it from your custom predicate though.
#:
#: :type: bool or None
self.threading_support = threading_support
#: Flag that is ``True`` if the event was created with :meth:`~hunter.event.Event.detach`.
#:
#: :type: bool
self.detached = False
def __repr__(self):
return '<Event kind=%r function=%r module=%r filename=%r lineno=%s>' % (
self.kind, self.function, self.module, self.filename, self.lineno
)
def __eq__(self, other):
return (
type(self) == type(other) and
self.kind == other.kind and
self.depth == other.depth and
self.function == other.function and
self.module == other.module and
self.filename == other.filename
)
def detach(self, value_filter=None):
"""
Return a copy of the event with references to live objects (like the frame) removed. You should use this if you
want to store or use the event outside the handler.
You should use this if you want to avoid memory leaks or side-effects when storing the events.
Args:
value_filter:
Optional callable that takes one argument: ``value``.
If not specified then the ``arg``, ``globals`` and ``locals`` fields will be ``None``.
Example usage in a :class:`~hunter.actions.ColorStreamAction` subclass:
.. sourcecode:: python
def __call__(self, event):
self.events = [event.detach(lambda field, value: self.try_repr(value))]
"""
event = Event.__new__(Event)
event.__dict__['code'] = self.code
event.__dict__['filename'] = self.filename
event.__dict__['fullsource'] = self.fullsource
event.__dict__['function'] = self.function
event.__dict__['lineno'] = self.lineno
event.__dict__['module'] = self.module
event.__dict__['source'] = self.source
event.__dict__['stdlib'] = self.stdlib
event.__dict__['threadid'] = self.threadid
event.__dict__['threadname'] = self.threadname
event.__dict__['instruction'] = self.instruction
if value_filter:
event.__dict__['arg'] = value_filter(self.arg)
event.__dict__['globals'] = {key: value_filter(value) for key, value in self.globals.items()}
event.__dict__['locals'] = {key: value_filter(value) for key, value in self.locals.items()}
else:
event.__dict__['globals'] = {}
event.__dict__['locals'] = {}
event.__dict__['arg'] = None
event.threading_support = self.threading_support
event.calls = self.calls
event.depth = self.depth
event.kind = self.kind
event.builtin = self.builtin
event.detached = True
return event
def clone(self):
event = Event.__new__(Event)
event.__dict__ = dict(self.__dict__)
return event
@cached_property
def instruction(self):
"""
Last byte instruction. If no bytecode was used (Cython code) then it returns ``None``.
Depending on Python version it might be an int or a single char string.
:type: int or single char string or None
"""
if self.frame.f_lasti >= 0 and self.frame.f_code.co_code:
return self.frame.f_code.co_code[self.frame.f_lasti]
@cached_property
def threadid(self):
"""
Current thread ident. If current thread is main thread then it returns ``None``.
:type: int or None
"""
current = self._thread.ident
main = get_main_thread()
if main is None:
return current
else:
return current if current != main.ident else None
@cached_property
def threadname(self):
"""
Current thread name.
:type: str
"""
return self._thread.name
@cached_property
def _thread(self):
return current_thread()
@cached_property
def locals(self):
"""
A dict with local variables.
:type: dict
"""
if self.builtin:
return {}
return self.frame.f_locals
@cached_property
def globals(self):
"""
A dict with global variables.
:type: dict
"""
if self.builtin:
return {}
return self.frame.f_globals
@cached_property
def function(self):
"""
A string with function name.
:type: str
"""
if self.builtin:
return self.arg.__name__
else:
return self.code.co_name
@cached_property
def function_object(self):
"""
The function instance.
.. warning:: Use with prudence.
* Will be ``None`` for decorated functions on Python 2 (methods may still work tho).
* May be ``None`` if tracing functions or classes not defined at module level.
* May be very slow if tracing modules with lots of variables.
:type: function or None
"""
# Based on MonkeyType's get_func
if self.builtin:
return self.builtin
code = self.code
if code.co_name is None:
return None
# First, try to find the function in globals
candidate = self.globals.get(code.co_name, None)
func = if_same_code(candidate, code)
# If that failed, as will be the case with class and instance methods, try
# to look up the function from the first argument. In the case of class/instance
# methods, this should be the class (or an instance of the class) on which our
# method is defined.
if func is None and code.co_argcount >= 1:
first_arg = self.locals.get(code.co_varnames[0])
func = get_func_in_mro(first_arg, code)
# If we still can't find the function, as will be the case with static methods,
# try looking at classes in global scope.
if func is None:
for v in self.globals.values():
if not isinstance(v, type):
continue
func = get_func_in_mro(v, code)
if func is not None:
break
return func
@cached_property
def module(self):
"""
A string with module name (like ``'foo.bar'``).
:type: str
"""
if self.builtin:
module = self.arg.__module__
else:
module = self.frame.f_globals.get('__name__', '')
if module is None:
module = '?'
if PY2:
module = module.encode('ascii', 'replace')
return module
@cached_property
def filename(self):
"""
A string with the path to the module's file. May be empty if ``__file__`` attribute is missing.
May be relative if running scripts.
:type: str
"""
# if self.builtin:
# return '<builtin>'
# if self.builtin:
# return '<builtin>'
filename = self.frame.f_code.co_filename
if not filename:
filename = self.frame.f_globals.get('__file__')
if not filename:
filename = '?'
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
elif filename.endswith('$py.class'): # Jython
filename = filename[:-9] + '.py'
elif filename.endswith(('.so', '.pyd')):
basename = CYTHON_SUFFIX_RE.sub('', filename)
for ext in ('.pyx', '.py'):
cyfilename = basename + ext
if exists(cyfilename):
filename = cyfilename
break
return filename
@cached_property
def lineno(self):
"""
An integer with line number in file.
:type: int
"""
return self.frame.f_lineno
@cached_property
def code(self):
"""
A code object (not a string).
"""
return self.frame.f_code
@cached_property
def stdlib(self):
"""
A boolean flag. ``True`` if frame is in stdlib.
:type: bool
"""
module_parts = self.module.split('.')
if 'pkg_resources' in module_parts:
# skip this over-vendored module
return True
elif self.filename == '<string>' and (self.module.startswith('namedtuple_') or self.module == 'site'):
# skip namedtuple exec garbage
return True
elif self.filename.startswith(SITE_PACKAGES_PATHS):
# if it's in site-packages then its definitely not stdlib
return False
elif self.filename.startswith(SYS_PREFIX_PATHS):
return True
else:
return False
@cached_property
def fullsource(self):
"""
A string with the sourcecode for the current statement (from ``linecache`` - failures are ignored).
May include multiple lines if it's a class/function definition (will include | |
All figures in a previous session were dumped to the file, and
all these figures are by this method reloaded and added to the
current set of figures.
"""
# in savefig, self._figs was pickled as one object
handle = open(filename, 'r')
filefigs = pickle.load(handle)
handle.close()
# check that filefigs is a dict of Figure instances:
fail = True
if isinstance(filefigs, dict):
fail = False
for item in filefigs:
if not isinstance(item, Figure):
fail = True
if fail:
raise Exception("Import error. Cannot retrieve figures from filename %s ." % filename)
self._figs.update(filefigs)
def dumpfig(self, filename='figspickle.txt'):
"""
Save all current figures to a file (with the given filename).
The file has standard Python pickle format (dict of Figure
instances). The figures can later be reloaded by the loadfig
method.
"""
handle = open(filename, 'w')
pickle.dump(self._figs, handle)
handle.close()
def hardcopy(self, filename, **kwargs):
"""
Save a hardcopy of the current figure to file (with the given
filename). The file format (image type) is determined from the
extension of the filename. If any changes have been made by working
directly on the backend instance, set the keyword argument replot
to False to stop Easyviz from replotting the current figure and
destroying those changes.
"""
# must be implemented in subclass
raise NotImplementedError('hardcopy not implemented in class %s' % \
self.__class__.__name__)
def hold(self, *args):
"""Change the hold state of the current axis.
Calling::
hold('on')
makes every subsequent plotting commands be added to the current plot.
Calling::
hold('off')
clears the previous plot before the new plot is drawn. This is the
default behavior.
Calling::
hold()
toggles the hold state in the current axis.
Calling::
hold(ax, ...)
affects the Axis object ax instead of the current axis.
Note that one can use hold(True) and hold(False) instead of
hold('on') and hold('off'), respectively.
"""
ax, args, nargs = self._check_args(*args)
if nargs == 1:
ax.setp(hold=args[0])
elif nargs == 0:
ax.toggle('hold')
print "hold state is %s" % ax.getp('hold')
else:
raise TypeError('hold: wrong number of arguments')
def ishold(self):
"""
Return the hold state (True if hold is on, and False if it is off).
"""
return self.gca().getp('hold')
def figure(self, num=None, **kwargs):
"""
Create a new figure or switch between figures and return Figure object.
num is the figure number of the new or existing figure.
"""
try:
num = int(num)
except:
# print str(num),' is not an integer'
if len(self._figs) == 0: # No figures left
num = 1
else:
num = max(list(self._figs.keys()))+1
#print "Active figure is %d." % num
if not num in self._figs:
# Points to class Figure or other convenient function
# In gnuplot backend this should instantiate a new pipe instead
kwargs['number'] = num
self._figs[num] = Figure(**kwargs)
self._attrs['curfig'] = num
return self._figs[num]
def clf(self):
"""Clear the current figure."""
#self.gcf().reset()
del self._figs[self._attrs['curfig']]
self.figure(self._attrs['curfig'])
def cla(self, *args):
"""Clear the current axis.
Calling::
cla()
clears the current axis.
Calling::
cla(ax)
clears the Axis object ax instead of the current axis.
"""
ax, args, nargs = self._check_args(*args)
ax.reset()
def axis(self, *args, **kwargs):
"""Choose the axis limits and appearance.
Calling::
axis([xmin, xmax, ymin, ymax[, zmin, zmax]])
sets the limits on the x-, y-, and z-axes in the current plot.
Calling::
axis(xmin, xmax, ymin, ymax[, zmin, zmax])
gives the same result as above.
Calling::
axis()
returns the limits on the x-, y-, and z-axes for the current plot.
If the view in the current plot is a 2D view, only the limits on the
x- and y-axis are returned.
Calling::
axis(mode)
sets axis scaling to mode, where mode can be
* 'auto' - autoscaling is used
* 'manual' - freeze the scaling at the current limits
* 'tight' - sets the axis limits to the range of the data
* 'fill' - has currently no affect
Calling::
axis(method)
sets the appearance of the current axis as specified by method.
%s
Calling::
axis(direction)
sets the direction of the increasing values on the axes.
* 'ij' - reverse y-axis
* 'xy' - restore y-axis
Calling::
axis('off')
turns off the visibility of the axis.
Calling::
axis('on')
turns the visibility of the axis back on.
Calling::
axis(ax, ...)
affects the Axis object ax instead of the current axis.
"""
ax, args, nargs = self._check_args(*args)
if nargs == 0 and len(kwargs) == 0:
xmin, xmax, ymin, ymax, zmin, zmax = ax.get_limits()
def get_lim(amin, amax, n1, n2):
if ax.getp(n1) is not None and ax.getp(n2) is not None:
return ax.getp(n1), ax.getp(n2)
else:
return amin, amax
xmin, xmax = get_lim(xmin, xmax, 'xmin', 'xmax')
ymin, ymax = get_lim(ymin, ymax, 'ymin', 'ymax')
zmin, zmax = get_lim(zmin, zmax, 'zmin', 'zmax')
if ax.getp('camera').getp('view') == 2:
return xmin, xmax, ymin, ymax
return xmin, xmax, ymin, ymax, zmin, zmax
limits = Axis._ranges
# Allow both axis(xmin,xmax,ymin,ymax[,zmin,zmax]) and
# axis([xmin,xmax,ymin,ymax[,zmin,zmax]])
if nargs == 1:
if isinstance(args[0], (tuple,list)):
args = args[0]; nargs = len(args)
elif isinstance(args[0], str):
if args[0] in Axis._modes:
ax.setp(mode=args[0])
elif args[0] in ['on', 'off']:
state = _toggle_state(args[0])
ax.setp(visible=state)
elif args[0] in Axis._methods:
ax.setp(method=args[0])
elif args[0] in Axis._directions:
ax.setp(direction=args[0])
kwargs_ = {}
# first treat positional arguments:
if nargs in (4,6):
for i in range(nargs):
kwargs_[limits[i]] = args[i]
# allow keyword arguments:
for kw in limits:
if kw in kwargs:
kwargs_[kw] = kwargs[kw]
ax.setp(**kwargs_)
if self.getp('interactive') and self.getp('show'):
self._replot()
axis.__doc__ = axis.__doc__ % docadd('Legal values for method are',
Axis._methods, indent=10)
def xlim(self, *args):
"""Set or get limits on x axis.
Calling::
xlim([xmin,xmax])
sets the x limits on the current axis.
Calling::
xlim(xmin,xmax)
gives the same results as above.
Calling::
xmin, xmax = xlim()
returns the x limits for the current axis.
Calling::
xlim(ax, ...)
affects the Axis object ax instead of the current axis.
"""
ax, args, nargs = self._check_args(*args)
if nargs == 0:
xmin = ax.getp('xmin')
xmax = ax.getp('xmax')
if xmin is None or xmax is None:
xmin, xmax = ax.getp('xlim')
if xmin is None or xmax is None:
return [0,1]
return xmin, xmax
elif nargs == 1:
arg = args[0]
if isinstance(arg, (list,tuple,ndarray)) and len(arg) == 2:
ax.setp(xmin=arg[0], xmax=arg[1])
elif isinstance(arg, str):
raise NotImplementedError()
elif nargs == 2:
ax.setp(xmin=args[0], xmax=args[1])
else:
raise TypeError('xlim: wrong number of arguments.')
if self.getp('interactive') and self.getp('show'):
self._replot()
def ylim(self, *args):
"""Set or get limits on y axis.
Calling::
ylim([ymin,ymax])
sets the y limits on the current axis.
Calling::
ylim(ymin,ymax)
gives the same results as above.
Calling::
ymin, ymax = ylim()
returns the y limits for the current axis.
Calling::
ylim(ax, ...)
affects the Axis object ax instead of the current axis.
"""
ax, args, nargs = self._check_args(*args)
if nargs == 0:
ymin = ax.getp('ymin')
ymax = ax.getp('ymax')
if ymin is None or ymax is None:
ymin, ymax = ax.getp('ylim')
if ymin is None or ymax is None:
return [0,1]
return ymin, ymax
elif nargs == 1:
arg = args[0]
if isinstance(arg, (list,tuple,ndarray)) and len(arg) == 2:
ax.setp(ymin=arg[0], ymax=arg[1])
elif isinstance(arg, str):
raise NotImplementedError()
elif nargs == 2:
ax.setp(ymin=args[0], ymax=args[1])
else:
raise TypeError('ylim: wrong number of arguments.')
if self.getp('interactive') and self.getp('show'):
self._replot()
def zlim(self, *args):
"""Set or get limits on z axis.
Calling::
zlim([zmin,zmax])
sets the z limits on the current axis.
Calling::
zlim(zmin,zmax)
gives the same results as above.
Calling::
zmin, zmax = zlim()
returns the z limits for the current axis.
Calling::
zlim(ax, ...)
affects the Axis object ax instead of the current axis.
"""
ax, args, nargs = self._check_args(*args)
if nargs == 0:
zmin = ax.getp('zmin')
zmax = ax.getp('zmax')
if zmin is None or zmax is None:
zmin, zmax = ax.getp('zlim')
if zmin is None or zmax is None:
return [0,1]
return zmin, zmax
elif nargs == 1:
arg = args[0]
if isinstance(arg, (list,tuple,ndarray)) and len(arg) == 2:
ax.setp(zmin=arg[0], zmax=arg[1])
elif isinstance(arg, str):
raise NotImplementedError()
elif nargs == 2:
ax.setp(zmin=args[0], zmax=args[1])
else:
raise TypeError('zlim: wrong number of arguments.')
if self.getp('interactive') and self.getp('show'):
self._replot()
def close(self, *args):
"""Close figure.
Calling::
close()
closes the current figure.
Calling::
close(num)
closes the figure with | |
<gh_stars>0
'''
/* ******************* */
/*! \file MarabouNetworkONNX.py
** \verbatim
** Top contributors (to current version):
** <NAME>
** This file is part of the Marabou project.
** Copyright (c) 2017-2019 by the authors listed in the file AUTHORS
** in the top-level source directory) and their institutional affiliations.
** All rights reserved. See the file COPYING in the top-level source
** directory for licensing information.\endverbatim
**
** \brief [[ Add one-line brief description here ]]
**
** [[ Add lengthier description here ]]
**/
'''
import numpy as np
import onnx
import onnxruntime
from onnx import numpy_helper
from onnx.helper import get_attribute_value
from maraboupy import MarabouUtils
from maraboupy import MarabouNetwork
from onnx import TensorProto
import itertools
class MarabouNetworkONNX(MarabouNetwork.MarabouNetwork):
def __init__(self, filename, inputNames=None, outputName=None):
"""
Constructs a MarabouNetworkONNX object from an ONNX file
Args:
filename: (string) Path to the ONNX file
inputNames: (list of strings) optional, list of node names corresponding to inputs.
outputName: (string) optional, name of node corresponding to output.
Returns:
marabouNetworkONNX: (MarabouNetworkONNX) representing network
"""
super().__init__()
self.readONNX(filename, inputNames, outputName)
def clear(self):
"""
Reset values to represent empty network
"""
super().clear()
self.madeGraphEquations = []
self.varMap = dict()
self.constantMap = dict()
self.shapeMap = dict()
self.inputNames = None
self.outputName = None
self.graph = None
def readONNX(self, filename, inputNames, outputName):
"""
Constructs a MarabouNetworkONNX object from an ONNX file
Args:
filename: (string) Path to the ONNX file
inputNames: (list of strings) optional, list of names corresponding to inputs.
outputName: (string) optional, name of node corresponding to output.
Returns:
marabouNetworkONNX: (MarabouNetworkONNX) representing network
"""
self.filename = filename
self.graph = onnx.load(filename).graph
# Get default inputs/output if no names are provided
if not inputNames:
assert len(self.graph.input) >= 1
initNames = [node.name for node in self.graph.initializer]
inputNames = [inp.name for inp in self.graph.input if inp.name not in initNames]
if not outputName:
if len(self.graph.output) > 1:
err_msg = "Your model has multiple outputs defined\n"
err_msg += "Please specify the name of the output you want to consider using the 'outputName' argument\n"
err_msg += "Possible options: " + ", ".join([out.name for out in self.graph.output])
raise RuntimeError(err_msg)
outputName = self.graph.output[0].name
# Check that input/outputs are in the graph
for name in inputNames:
if not len([nde for nde in self.graph.node if name in nde.input]):
raise RuntimeError("Input %s not found in graph!" % name)
if not len([nde for nde in self.graph.node if outputName in nde.output]):
raise RuntimeError("Output %s not found in graph!" % outputName)
self.inputNames = inputNames
self.outputName = outputName
# Process the shapes and values of the graph while making Marabou equations and constraints
self.foundnInputFlags = 0
self.processGraph()
# If the given inputNames/outputName specify only a portion of the network, then we will have
# shape information saved not relevant to the portion of the network. Remove extra shapes.
self.cleanShapes()
# Other Marabou input parsers assign output variables immediately after input variables and before any
# intermediate variables. This function reassigns variable numbering to match other parsers.
# If this is skipped, the output variables will be the last variables defined.
self.reassignOutputVariables()
def processGraph(self):
"""
Processes the ONNX graph to produce Marabou equations
"""
# Add shapes for the graph's inputs
for node in self.graph.input:
self.shapeMap[node.name] = list([dim.dim_value if dim.dim_value > 0 else 1 for dim in node.type.tensor_type.shape.dim])
# If we find one of the specified inputs, create new variables
if node.name in self.inputNames:
self.madeGraphEquations += [node.name]
self.foundnInputFlags += 1
self.makeNewVariables(node.name)
self.inputVars += [np.array(self.varMap[node.name])]
# Add shapes for constants
for node in self.graph.initializer:
self.shapeMap[node.name] = list(node.dims)
self.madeGraphEquations += [node.name]
# Recursively create remaining shapes and equations as needed
self.makeGraphEquations(self.outputName, True)
def makeGraphEquations(self, nodeName, makeEquations):
"""
Recursively populates self.shapeMap, self.varMap, and self.constantMap while creating Marabou equations
and constraints as needed
Arguments:
nodeName: (str) name of node for making the shape
makeEquations: (bool) create Marabou equations for this node
"""
if nodeName in self.madeGraphEquations:
return
if nodeName in self.inputNames:
self.foundnInputFlags += 1
# If an inputName is an intermediate layer of the network, we don't need to create Marabou
# equations for its inputs. However, we still need to call makeMarabouEquations in order to
# compute shapes. We just need to set the makeEquations flag to false
makeEquations = False
self.madeGraphEquations += [nodeName]
# Recursively call makeGraphEquations, then call makeMarabouEquations
# This ensures that shapes and values of a node's inputs have been computed first
for inNodeName in self.getInputNodes(nodeName):
self.makeGraphEquations(inNodeName, makeEquations)
# By this point, all input variables need to have been found
if self.foundnInputFlags != len(self.inputNames):
err_msg = "These input variables could not be found: %s"%(", ".join([inVar for inVar in self.inputNames if inVar not in self.varMap]))
raise RuntimeError(err_msg)
# Compute node's shape and create Marabou equations as needed
self.makeMarabouEquations(nodeName, makeEquations)
# Create new variables when we find one of the inputs
if nodeName in self.inputNames:
self.makeNewVariables(nodeName)
self.inputVars += [np.array(self.varMap[nodeName])]
def makeMarabouEquations(self, nodeName, makeEquations):
"""
Compute the shape and values of a node assuming the input shapes and values have been computed already.
Arguments:
nodeName: (str) name of node for which we want to compute the output shape
makeEquations: (bool) create Marabou equations for this node
"""
node = self.getNode(nodeName)
if node.op_type == 'Constant':
self.constant(node)
elif node.op_type == 'Identity':
self.identity(node)
elif node.op_type == 'Cast':
self.cast(node)
elif node.op_type == 'Reshape':
self.reshape(node)
elif node.op_type == 'Flatten':
self.flatten(node)
elif node.op_type == "Transpose":
self.transpose(node)
elif node.op_type == "MaxPool":
self.maxpoolEquations(node, makeEquations)
elif node.op_type == "Conv":
self.convEquations(node, makeEquations)
elif node.op_type == 'Gemm':
self.gemmEquations(node, makeEquations)
elif node.op_type == 'MatMul':
self.matMulEquations(node, makeEquations)
elif node.op_type == 'Add':
self.addEquations(node, makeEquations)
elif node.op_type == 'Relu':
self.reluEquations(node, makeEquations)
else:
raise NotImplementedError("Operation %s not implemented" % (node.op_type))
def getNode(self, nodeName):
"""
Find the node in the graph corresponding to the given name
Arguments:
nodeName: (str) name of node to find in graph
Returns:
ONNX node named nodeName
"""
nodes = [node for node in self.graph.node if nodeName in node.output]
assert len(nodes) == 1
return nodes[0]
def makeNewVariables(self, nodeName):
"""
Assuming the node's shape is known, return a set of new variables in the same shape
Arguments:
nodeName: (str) name of node
Returns:
v: (np.array) array of variable numbers
"""
assert nodeName not in self.varMap
shape = self.shapeMap[nodeName]
size = np.prod(shape)
v = np.array([self.getNewVariable() for _ in range(size)]).reshape(shape)
self.varMap[nodeName] = v
assert all([np.equal(np.mod(i, 1), 0) for i in v.reshape(-1)]) # check if integers
return v
def getInputNodes(self, nodeName):
"""
Get names of nodes that are inputs to the given node
Arguments:
nodeName: (str) name of node
saveConstant: (bool) if true, save constant variables to self.constantMap
Returns:
inNodes: (list of str) names of nodes that are inputs to the given node
"""
node = self.getNode(nodeName)
inNodes = []
for inp in node.input:
if len([nde for nde in self.graph.node if inp in nde.output]):
inNodes += [inp]
elif len([nde for nde in self.graph.initializer if nde.name == inp]):
self.constantMap[inp] = [numpy_helper.to_array(init) for init in self.graph.initializer if init.name == inp][0]
return inNodes
def constant(self, node):
"""
Function representing a constant tensor
Arguments:
node: (node) representing constant operation
"""
nodeName = node.output[0]
for attr in node.attribute:
if attr.name == "value":
self.constantMap[nodeName] = numpy_helper.to_array(get_attribute_value(attr))
return
raise RuntimeError("Could not find value of tensor constant")
def identity(self, node):
"""
Function representing identity
Arguments:
node: (node) representing identity operation
"""
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
if inputName in self.varMap:
self.varMap[nodeName] = self.varMap[inputName]
elif inputName in self.constantMap:
self.constantMap[nodeName] = self.constantMap[inputName]
def cast(self, node):
"""
Function representing cast
Arguments:
node: (node) representing cast operation
"""
nodeName = node.output[0]
inputName = node.input[0]
self.shapeMap[nodeName] = self.shapeMap[inputName]
# Try to find type to cast to. If not found, raise error
to = None
for attr in node.attribute:
if attr.name == "to":
to = get_attribute_value(attr)
if to is None:
raise RuntimeError("Casting type not specified with attribute 'to'")
# Cast input array to correct type, and throw | |
on each item in the batch
_, _, window, _,_,_,_,_,_ = parse_image_meta_graph(image_meta,self.config)
[detections_batch,best_indices] = utils.batch_slice(
[rois]+ robotvqa_class+[mrcnn_bbox, window,robotvqa_poses],
lambda x: refine_detections_graph(x, self.config),
self.config.IMAGES_PER_GPU,parallel_processing=True)
# Reshape output
# [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
objdesc= tf.reshape(
detections_batch,
[self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 4+(self.config.NUM_FEATURES-2)*2+6])
best_indices= tf.reshape(
best_indices,
[self.config.BATCH_SIZE,self.config.DETECTION_MAX_INSTANCES])
return [objdesc,best_indices]
def compute_output_shape(self, input_shape):
return [(None, self.config.DETECTION_MAX_INSTANCES, 4+self.config.NUM_FEATURES*2+6),
(None, self.config.DETECTION_MAX_INSTANCES)]
def compute_mask(self, inputs, mask=None):
return [None,None,None,None,None,None,None,None,None]
# Region Proposal Network (RPN)
def rpn_graph(feature_map, anchors_per_location, anchor_stride):
"""Builds the computation graph of Region Proposal Network.
feature_map: backbone features [batch, height, width, depth]
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
Returns:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, H, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
# TODO: check if stride of 2 causes alignment issues if the featuremap
# is not even.
# Shared convolutional base of the RPN
shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',
strides=anchor_stride,
name='rpn_conv_shared')(feature_map)
# Anchor Score. [batch, height, width, anchors per location * 2].
x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',
activation='linear', name='rpn_class_raw')(shared)
# Reshape to [batch, anchors, 2]
rpn_class_logits = KL.Lambda(
lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)
# Softmax on last dimension of BG/FG.
rpn_probs = KL.Activation(
"softmax", name="rpn_class_xxx")(rpn_class_logits)
# Bounding box refinement. [batch, H, W, anchors per location, depth]
# where depth is [x, y, log(w), log(h)]
x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding="valid",
activation='linear', name='rpn_bbox_pred')(shared)
# Reshape to [batch, anchors, 4]
rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)
return [rpn_class_logits, rpn_probs, rpn_bbox]
def build_rpn_model(anchor_stride, anchors_per_location, depth):
"""Builds a Keras model of the Region Proposal Network.
It wraps the RPN graph so it can be used multiple times with shared
weights.
anchors_per_location: number of anchors per pixel in the feature map
anchor_stride: Controls the density of anchors. Typically 1 (anchors for
every pixel in the feature map), or 2 (every other pixel).
depth: Depth of the backbone feature map.
Returns a Keras Model object. The model outputs, when called, are:
rpn_logits: [batch, H, W, 2] Anchor classifier logits (before softmax)
rpn_probs: [batch, W, W, 2] Anchor classifier probabilities.
rpn_bbox: [batch, H, W, (dy, dx, log(dh), log(dw))] Deltas to be
applied to anchors.
"""
input_feature_map = KL.Input(shape=[None, None, depth],
name="input_rpn_feature_map")
outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)
return KM.Model([input_feature_map], outputs, name="rpn_model")
############################################################
# Background Extraction
############################################################
def Background_Extraction(robotvqa_feature_maps,config):
"""this function is intended to extract the global features from the image.
Very interesting for the relational network is the facial orientation of the camera or observer
Input:
robotvqa_feature_maps: feature maps from the basenet [P2,P3,P4,P5]
Output:
x: the background as a vector-like tensor of size batch*1024
"""
#1.global average pooling along the depth of the feature maps
P2,P3,P4,P5=robotvqa_feature_maps
print(' maps" shapes:',P4.shape,P5.shape,P2.shape,P3.shape)
F5=KL.pooling.GlobalMaxPooling2D()(P5)#batch,h,w,256
F2=KL.pooling.GlobalMaxPooling2D()(P2)#batch,h,w,256
F3=KL.pooling.GlobalMaxPooling2D()(P3)#batch,h,w,256
F4=KL.pooling.GlobalMaxPooling2D()(P4)#batch,h,w,256
print('globally pooled maps" shapes:',F4.shape,F5.shape,F2.shape,F3.shape)
x=KL.Concatenate(axis=1)([F2,F3,F4,F5])#batch 1024
print('global average pooling shape:',x.shape)
x=KL.Lambda(lambda x:tf.expand_dims(x,axis=1))(x)
print('expanded global average pooling shape:',x.shape)
#2. apply a sngle semi-non-linear-fully-connected layer
x=KL.TimeDistributed(KL.Dense(1024),name='robotvqa_background_extraction')(x)
x = KL.LeakyReLU(alpha=0.5)(x)
print('expanded background shape:',x.shape)
x=KL.Lambda(lambda x:tf.squeeze(x,axis=1))(x)
print(' background shape:',x.shape)
return x
############################################################
# Feature Pyramid Network Heads
############################################################
def fpn_classifier_graph(rois, feature_maps,image_shape, pool_size, num_classes,config):
"""Builds the computation graph of the feature pyramid network classifier
and regressor heads.
selector: 0 for training and 1 for inference
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes/feature, which determines the depth of the results
Returns:
logits: [N, NUM_FEATURES, NUM_CLASSES] classifier logits (before softmax)
probs: [N,NUM_FEATURES, NUM_CLASSES] classifier probabilities
bbox_deltas: [N, (dy, dx, log(dh), log(dw))] Deltas to apply to
proposal boxes
poses: [N, (tetax,tetay,tetaz,x,y,z)]
"""
# ROI Pooling
# Shape: [batch, num_boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_classifier")([rois] + feature_maps)
# Two 1024 FC layers (implemented with Conv2D for consistency)
x = KL.TimeDistributed(KL.Conv2D(1024, (pool_size, pool_size), padding="valid"),
name="robotvqa_class_conv1")(x)
#x = KL.TimeDistributed(BatchNorm(axis=3), name='robotvqa_class_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(1024, (1, 1)),
name="robotvqa_class_conv2")(x)
#x = KL.TimeDistributed(BatchNorm(axis=3),name='robotvqa_class_bn2')(x)
x = KL.Activation('relu')(x)
shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),
name="pool_squeeze")(x)
#add rois' location (global features) to rois' features(local features)
shared=KL.Concatenate(axis=2)([shared,rois])
# Classifier head
robotvqa_class_logits=[]
robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[0],activation='relu'),
name='robotvqa_class_logits0')(shared))
robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[1],activation='relu'),
name='robotvqa_class_logits1')(shared))
robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[2],activation='relu'),
name='robotvqa_class_logits2')(shared))
robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[3],activation='relu'),
name='robotvqa_class_logits3')(shared))
robotvqa_class_logits.append(KL.TimeDistributed(KL.Dense(num_classes[4],activation='relu'),
name='robotvqa_class_logits4')(shared))
robotvqa_probs = []
robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"),
name="robotvqa_class0")(robotvqa_class_logits[0]))
robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"),
name="robotvqa_class1")(robotvqa_class_logits[1]))
robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"),
name="robotvqa_class2")(robotvqa_class_logits[2]))
robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"),
name="robotvqa_class3")(robotvqa_class_logits[3]))
robotvqa_probs.append(KL.TimeDistributed(KL.Activation("softmax"),
name="robotvqa_class4")(robotvqa_class_logits[4]))
# BBox head
# [batch, boxes, num_classes * (dy, dx, log(dh), log(dw))]
x = KL.TimeDistributed(KL.Dense(num_classes[0] * 4, activation='linear'),
name='mrcnn_bbox_fc')(shared)
# Reshape to [batch, boxes, num_classes, (dy, dx, log(dh), log(dw))]
s = K.int_shape(x)
mrcnn_bbox = KL.Reshape((s[1], num_classes[0], 4), name="mrcnn_bbox")(x)
# Poses head with residual connections
# [batch, boxes, num_classes * (tx,ty,tz,x,y,z)]
x1 = KL.TimeDistributed(KL.Dense(1028, activation='relu'),
name='robotvqa_poses_fc2')(shared)
x2 = KL.TimeDistributed(KL.Dense(1028, activation='linear'),
name='robotvqa_poses_fc3')(KL.Average()([shared,x1]))
x3 = KL.TimeDistributed(KL.Dense(1028, activation='relu'),
name='robotvqa_poses_fc0')(KL.Average()([x1, x2]))
x4 = KL.TimeDistributed(KL.Dense(1028, activation='linear'),
name='robotvqa_poses_fc1')(KL.Average()([x2, x3]))
x = KL.TimeDistributed(KL.Dense(num_classes[0] * 6, activation='relu'),
name='robotvqa_poses_fc4')(KL.Average()([x3, x4]))
# Reshape to [batch, boxes, num_classes, (tx,ty,tz,x,y,z)]
s = K.int_shape(x)
robotvqa_poses = KL.Reshape((s[1], num_classes[0], 6), name="robotvqa_poses")(x)
return robotvqa_class_logits, robotvqa_probs, mrcnn_bbox, robotvqa_poses,shared
def build_fpn_mask_graph(rois, feature_maps,
image_shape, pool_size, num_classes):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from diffent layers of the pyramid,
[P2, P3, P4, P5]. Each has a different resolution.
image_shape: [height, width, depth]
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
Returns: Masks [batch, roi_count, height, width, num_classes]
"""
# ROI Pooling
# Shape: [batch, boxes, pool_height, pool_width, channels]
x = PyramidROIAlign([pool_size, pool_size], image_shape,
name="roi_align_mask")([rois] + feature_maps)
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
#x = KL.TimeDistributed(BatchNorm(axis=3),name='mrcnn_mask_bn1')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
#x = KL.TimeDistributed(BatchNorm(axis=3),name='mrcnn_mask_bn2')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
#x = KL.TimeDistributed(BatchNorm(axis=3),name='mrcnn_mask_bn3')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
#x = KL.TimeDistributed(BatchNorm(axis=3),name='mrcnn_mask_bn4')(x)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name="mrcnn_mask")(x)
return x
############################################################
# Loss Functions
############################################################
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typicallly: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
def rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):
"""Return the RPN bounding box loss graph.
config: the model config object.
target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].
Uses 0 padding to fill in unsed bbox deltas.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]
"""
# Positive anchors contribute to the loss, but negative and
# neutral anchors (match value of 0 or -1) don't.
rpn_match = K.squeeze(rpn_match, -1)
indices = tf.where(K.equal(rpn_match, 1))
# Pick bbox deltas that contribute to the loss
rpn_bbox = tf.gather_nd(rpn_bbox, | |
CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._filter_inbox_and_archive_predicates, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_age, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_duration, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_height, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_limit, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_mime, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_num_pixels, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_num_tags, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_num_words, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_ratio, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_similar_to, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_size, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_system_predicate_width, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, QW.QWidget( self ), CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'always_show_system_everything', self._always_show_system_everything.isChecked() )
self._new_options.SetBoolean( 'filter_inbox_and_archive_predicates', self._filter_inbox_and_archive_predicates.isChecked() )
system_predicates = HC.options[ 'file_system_predicates' ]
system_predicates[ 'age' ] = self._file_system_predicate_age.GetInfo()
system_predicates[ 'duration' ] = self._file_system_predicate_duration.GetInfo()
system_predicates[ 'hamming_distance' ] = self._file_system_predicate_similar_to.GetInfo()[1]
system_predicates[ 'height' ] = self._file_system_predicate_height.GetInfo()
system_predicates[ 'limit' ] = self._file_system_predicate_limit.GetInfo()
system_predicates[ 'mime' ] = self._file_system_predicate_mime.GetInfo()
system_predicates[ 'num_pixels' ] = self._file_system_predicate_num_pixels.GetInfo()
system_predicates[ 'num_tags' ] = self._file_system_predicate_num_tags.GetInfo()
system_predicates[ 'num_words' ] = self._file_system_predicate_num_words.GetInfo()
system_predicates[ 'ratio' ] = self._file_system_predicate_ratio.GetInfo()
system_predicates[ 'size' ] = self._file_system_predicate_size.GetInfo()
system_predicates[ 'width' ] = self._file_system_predicate_width.GetInfo()
HC.options[ 'file_system_predicates' ] = system_predicates
class _ExternalProgramsPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
mime_panel = ClientGUICommon.StaticBox( self, '\'open externally\' launch paths' )
self._web_browser_path = QW.QLineEdit( mime_panel )
columns = [ ( 'filetype', 20 ), ( 'launch path', -1 ) ]
self._mime_launch_listctrl = ClientGUIListCtrl.BetterListCtrl( mime_panel, 'mime_launch', 15, 30, columns, self._ConvertMimeToListCtrlTuples, activation_callback = self._EditMimeLaunch )
#
web_browser_path = self._new_options.GetNoneableString( 'web_browser_path' )
if web_browser_path is not None:
self._web_browser_path.setText( web_browser_path )
for mime in HC.SEARCHABLE_MIMES:
launch_path = self._new_options.GetMimeLaunch( mime )
self._mime_launch_listctrl.AddDatas( [ ( mime, launch_path ) ] )
self._mime_launch_listctrl.Sort( 0 )
#
vbox = QP.VBoxLayout()
text = 'Setting a specific web browser path here--like \'C:\\program files\\firefox\\firefox.exe "%path%"\'--can help with the \'share->open->in web browser\' command, which is buggy working with OS defaults, particularly on Windows. It also fixes #anchors, which are dropped in some OSes using default means. Use the same %path% format for the \'open externally\' commands below.'
st = ClientGUICommon.BetterStaticText( mime_panel, text )
st.setWordWrap( True )
mime_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Manual web browser launch path: ', self._web_browser_path ) )
gridbox = ClientGUICommon.WrapInGrid( mime_panel, rows )
mime_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
mime_panel.Add( self._mime_launch_listctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, mime_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _ConvertMimeToListCtrlTuples( self, data ):
( mime, launch_path ) = data
pretty_mime = HC.mime_string_lookup[ mime ]
if launch_path is None:
pretty_launch_path = 'default: ' + HydrusPaths.GetDefaultLaunchPath()
else:
pretty_launch_path = launch_path
display_tuple = ( pretty_mime, pretty_launch_path )
sort_tuple = display_tuple
return ( display_tuple, sort_tuple )
def _EditMimeLaunch( self ):
for ( mime, launch_path ) in self._mime_launch_listctrl.GetData( only_selected = True ):
message = 'Enter the new launch path for ' + HC.mime_string_lookup[ mime ]
message += os.linesep * 2
message += 'Hydrus will insert the file\'s full path wherever you put %path%, even multiple times!'
message += os.linesep * 2
message += 'Set as blank to reset to default.'
if launch_path is None:
default = 'program "%path%"'
else:
default = launch_path
with ClientGUIDialogs.DialogTextEntry( self, message, default = default, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
new_launch_path = dlg.GetValue()
if new_launch_path == '':
new_launch_path = None
if new_launch_path not in ( launch_path, default ):
self._mime_launch_listctrl.DeleteDatas( [ ( mime, launch_path ) ] )
self._mime_launch_listctrl.AddDatas( [ ( mime, new_launch_path ) ] )
else:
break
self._mime_launch_listctrl.Sort()
def UpdateOptions( self ):
web_browser_path = self._web_browser_path.text()
if web_browser_path == '':
web_browser_path = None
self._new_options.SetNoneableString( 'web_browser_path', web_browser_path )
for ( mime, launch_path ) in self._mime_launch_listctrl.GetData():
self._new_options.SetMimeLaunch( mime, launch_path )
class _FilesAndTrashPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._export_location = QP.DirPickerCtrl( self )
self._file_system_waits_on_wakeup = QW.QCheckBox( self )
self._file_system_waits_on_wakeup.setToolTip( 'This is useful if your hydrus is stored on a NAS that takes a few seconds to get going after your machine resumes from sleep.' )
self._delete_to_recycle_bin = QW.QCheckBox( self )
self._confirm_trash = QW.QCheckBox( self )
self._confirm_archive = QW.QCheckBox( self )
self._remove_filtered_files = QW.QCheckBox( self )
self._remove_trashed_files = QW.QCheckBox( self )
self._trash_max_age = ClientGUICommon.NoneableSpinCtrl( self, '', none_phrase = 'no age limit', min = 0, max = 8640 )
self._trash_max_size = ClientGUICommon.NoneableSpinCtrl( self, '', none_phrase = 'no size limit', min = 0, max = 20480 )
advanced_file_deletion_panel = ClientGUICommon.StaticBox( self, 'advanced file deletion and custom reasons' )
self._use_advanced_file_deletion_dialog = QW.QCheckBox( advanced_file_deletion_panel )
self._use_advanced_file_deletion_dialog.setToolTip( 'If this is set, the client will present a more complicated file deletion confirmation dialog that will permit you to set your own deletion reason and perform \'clean\' deletes that leave no deletion record (making later re-import easier).' )
self._advanced_file_deletion_reasons = ClientGUIListBoxes.QueueListBox( advanced_file_deletion_panel, 5, str, add_callable = self._AddAFDR, edit_callable = self._EditAFDR )
#
if HC.options[ 'export_path' ] is not None:
abs_path = HydrusPaths.ConvertPortablePathToAbsPath( HC.options[ 'export_path' ] )
if abs_path is not None:
self._export_location.SetPath( abs_path )
self._file_system_waits_on_wakeup.setChecked( self._new_options.GetBoolean( 'file_system_waits_on_wakeup' ) )
self._delete_to_recycle_bin.setChecked( HC.options[ 'delete_to_recycle_bin' ] )
self._confirm_trash.setChecked( HC.options[ 'confirm_trash' ] )
self._confirm_archive.setChecked( HC.options[ 'confirm_archive' ] )
self._remove_filtered_files.setChecked( HC.options[ 'remove_filtered_files' ] )
self._remove_trashed_files.setChecked( HC.options[ 'remove_trashed_files' ] )
self._trash_max_age.SetValue( HC.options[ 'trash_max_age' ] )
self._trash_max_size.SetValue( HC.options[ 'trash_max_size' ] )
self._use_advanced_file_deletion_dialog.setChecked( self._new_options.GetBoolean( 'use_advanced_file_deletion_dialog' ) )
self._use_advanced_file_deletion_dialog.clicked.connect( self._UpdateAdvancedControls )
self._advanced_file_deletion_reasons.AddDatas( self._new_options.GetStringList( 'advanced_file_deletion_reasons' ) )
self._UpdateAdvancedControls()
#
vbox = QP.VBoxLayout()
text = 'If you set the default export directory blank, the client will use \'hydrus_export\' under the current user\'s home directory.'
QP.AddToLayout( vbox, ClientGUICommon.BetterStaticText(self,text), CC.FLAGS_CENTER )
rows = []
rows.append( ( 'Confirm sending files to trash: ', self._confirm_trash ) )
rows.append( ( 'Confirm sending more than one file to archive or inbox: ', self._confirm_archive ) )
rows.append( ( 'Wait 15s after computer resume before accessing files: ', self._file_system_waits_on_wakeup ) )
rows.append( ( 'When deleting files or folders, send them to the OS\'s recycle bin: ', self._delete_to_recycle_bin ) )
rows.append( ( 'Remove files from view when they are filtered: ', self._remove_filtered_files ) )
rows.append( ( 'Remove files from view when they are sent to the trash: ', self._remove_trashed_files ) )
rows.append( ( 'Number of hours a file can be in the trash before being deleted: ', self._trash_max_age ) )
rows.append( ( 'Maximum size of trash (MB): ', self._trash_max_size ) )
rows.append( ( 'Default export directory: ', self._export_location ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
rows = []
rows.append( ( 'Use the advanced file deletion dialog: ', self._use_advanced_file_deletion_dialog ) )
gridbox = ClientGUICommon.WrapInGrid( advanced_file_deletion_panel, rows )
advanced_file_deletion_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
advanced_file_deletion_panel.Add( self._advanced_file_deletion_reasons, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, advanced_file_deletion_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _AddAFDR( self ):
reason = 'I do not like the file.'
return self._EditAFDR( reason )
def _EditAFDR( self, reason ):
with ClientGUIDialogs.DialogTextEntry( self, 'enter the reason', default = reason, allow_blank = False ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
return reason
else:
raise HydrusExceptions.VetoException()
def _UpdateAdvancedControls( self ):
if self._use_advanced_file_deletion_dialog.isChecked():
self._advanced_file_deletion_reasons.setEnabled( True )
else:
self._advanced_file_deletion_reasons.setEnabled( False )
def UpdateOptions( self ):
HC.options[ 'export_path' ] = HydrusPaths.ConvertAbsPathToPortablePath( self._export_location.GetPath() )
self._new_options.SetBoolean( 'file_system_waits_on_wakeup', self._file_system_waits_on_wakeup.isChecked() )
HC.options[ 'delete_to_recycle_bin' ] = self._delete_to_recycle_bin.isChecked()
HC.options[ 'confirm_trash' ] = self._confirm_trash.isChecked()
HC.options[ 'confirm_archive' ] = self._confirm_archive.isChecked()
HC.options[ 'remove_filtered_files' ] = self._remove_filtered_files.isChecked()
HC.options[ 'remove_trashed_files' ] = | |
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.CreateBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_backup_from_dict():
test_create_backup(request_type=dict)
def test_create_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
client.create_backup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.CreateBackupRequest()
@pytest.mark.asyncio
async def test_create_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateBackupRequest):
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.CreateBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_backup_async_from_dict():
await test_create_backup_async(request_type=dict)
def test_create_backup_field_headers():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_table_admin.CreateBackupRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_backup_field_headers_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_table_admin.CreateBackupRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.create_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_backup_flattened():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_backup(
parent='parent_value',
backup_id='backup_id_value',
backup=table.Backup(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].backup_id == 'backup_id_value'
assert args[0].backup == table.Backup(name='name_value')
def test_create_backup_flattened_error():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_backup(
bigtable_table_admin.CreateBackupRequest(),
parent='parent_value',
backup_id='backup_id_value',
backup=table.Backup(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_backup_flattened_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_backup(
parent='parent_value',
backup_id='backup_id_value',
backup=table.Backup(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].backup_id == 'backup_id_value'
assert args[0].backup == table.Backup(name='name_value')
@pytest.mark.asyncio
async def test_create_backup_flattened_error_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_backup(
bigtable_table_admin.CreateBackupRequest(),
parent='parent_value',
backup_id='backup_id_value',
backup=table.Backup(name='name_value'),
)
def test_get_backup(transport: str = 'grpc', request_type=bigtable_table_admin.GetBackupRequest):
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = table.Backup(
name='name_value',
source_table='source_table_value',
size_bytes=1089,
state=table.Backup.State.CREATING,
)
response = client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.GetBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, table.Backup)
assert response.name == 'name_value'
assert response.source_table == 'source_table_value'
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
def test_get_backup_from_dict():
test_get_backup(request_type=dict)
def test_get_backup_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_backup),
'__call__') as call:
client.get_backup()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.GetBackupRequest()
@pytest.mark.asyncio
async def test_get_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetBackupRequest):
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Backup(
name='name_value',
source_table='source_table_value',
size_bytes=1089,
state=table.Backup.State.CREATING,
))
response = await client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == bigtable_table_admin.GetBackupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, table.Backup)
assert response.name == 'name_value'
assert response.source_table == 'source_table_value'
assert response.size_bytes == 1089
assert response.state == table.Backup.State.CREATING
@pytest.mark.asyncio
async def test_get_backup_async_from_dict():
await test_get_backup_async(request_type=dict)
def test_get_backup_field_headers():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_table_admin.GetBackupRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_backup),
'__call__') as call:
call.return_value = table.Backup()
client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_backup_field_headers_async():
client = BigtableTableAdminAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = bigtable_table_admin.GetBackupRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_backup),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup())
await client.get_backup(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_backup_flattened():
client = BigtableTableAdminClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_backup),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = table.Backup()
# Call the method with | |
<filename>program/diceroll.py
#
# diceroll.py
#
# Written for Classic Python 2.5.4
#
# To use this module: from diceroll import roll
#
# Make a dice roll
#
##########################################################
'''
diecroll module containing roll()
Usage:
from diceroll import roll
print roll('2D6')
Will roll two 6-sided dice, returning an integer
'''
from random import randint
import os
import logging
import sys
from colorama import init
from colorama import Fore, Back, Style
init() # initialize colorama
__version__ = '3.1'
__release__ = '3.1.0b'
__author__ = '<NAME> <<EMAIL>>\n<EMAIL>'
diceroll_log = logging.getLogger('diceroll')
diceroll_log.setLevel(logging.INFO)
if not os.path.exists('Logs'):
os.mkdir('Logs')
fh = logging.FileHandler('Logs/diceroll.log', 'w')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s - %(message)s',
datefmt = '%a, %d %b %Y %H:%M:%S')
fh.setFormatter(formatter)
diceroll_log.addHandler(fh)
diceroll_log.info('Logging started.')
diceroll_log.info('roll() v' + __version__ + ' started, and running...')
number_of_dice = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']
simple_dice = ['D3', 'D4', 'D6', 'D8', 'D10', 'D12', 'D20', 'D30']
traveller5_dice = ['1D', '2D', '3D', '4D', '5D', '6D', '7D', '8D', '9D', '10D']
def _dierolls(dtype, dcount):
'''
Takes two integer arguments:
dtype (the number of sides for the dice)
dcount (the number of dice to roll)
and returns an integer value.
This function is for internal use and has no error-checking!
'''
dtotal = 0
if dcount == 1:
diceroll_log.debug('Using %s %d-sided die...' % (number_of_dice[dcount], dtype))
else:
if dcount < 11:
diceroll_log.debug('Using %s %d-sided dice...' % (number_of_dice[dcount], dtype))
else:
diceroll_log.debug('Using %d %d-sided dice...' % (dcount, dtype))
for i in range(dcount):
rolled = randint(1, dtype)
if rolled == 8 or rolled == 11 or rolled == 18 or rolled >= 80 and rolled <= 89:
diceroll_log.debug('Rolled an %s' % rolled)
else:
diceroll_log.debug('Rolled a %s' % rolled)
dtotal += rolled
return dtotal
def roll(dice):
'''
The dice types to roll are:
'4dF', 'D2', 'D3', 'D4', 'D6', 'D8', 'D09', 'D10',
'D12', 'D20', 'D30', 'D099', 'D100', 'D66', 'DD',
'FLUX', 'GOODFLUX', 'BADFLUX', 'BOON', 'BANE',
and also Traveller5's 1D thru 10D rolls
Some examples are:
roll('D6') or roll('1D6') -- roll one 6-sided die
roll('2D6') -- roll two 6-sided dice
roll('D09') -- roll a 10-sided die (0 - 9)
roll('D10') -- roll a 10-sided die (1 - 10)
roll('D099') -- roll a 100-sided die (0 - 99)
roll('D100') -- roll a 100-sided die (1 - 100)
roll('D66') -- roll for a D66 chart
roll('FLUX') -- a FLUX roll (-5 to 5)
roll('3D6+6') -- add +6 DM to roll
roll('4D4-4') -- add -4 DM to roll
roll('2DD+3') -- roll (2D6+3) x 10
roll('BOON') -- roll 3D6 and keep the higher two dice
roll('4dF') -- make a FATE roll
roll('4D') -- make a Traveller5 4D roll
roll('info') -- release version of program
An invalid roll will return a 0.
'''
log = logging.getLogger('your_code_name_here.diceroll')
# make inputted string argument upper case, and remove spaces
dice = str(dice).upper().replace(' ','')
# was information for this program asked for?
if dice == 'INFO':
ver = 'roll(), release version ' + __release__ + ' for Classic Python 2.5.4'
diceroll_log.info('Reporting: roll() release version: %s' % __release__)
return __version__, ver
# was a test asked for?
if dice == 'TEST':
diceroll_log.info('A 6x6 test was started...')
roll_chart_6x6 = {}
data = []
for i in range(13):
data.append(0)
n = 10000
for i in range(6):
for j in range(6):
roll_chart_6x6[(i+1, j+1)] = 0
print
print ' 6x6 Roll Chart Test'
print ' 1 2 3 4 5 6'
for i in range(n):
die1 = _dierolls(6, 1)
die2 = _dierolls(6, 1)
roll_chart_6x6[(die1, die2)] += 1
result = die1 + die2
data[result] += 1
for i in range(6):
print i+1,
for j in range(6):
print '%4d' % roll_chart_6x6[(i+1, j+1)],
print
for i in range(6):
for j in range(6):
roll_chart_6x6[(i+1, j+1)] = 0
print
print ' 6x6 Roll Chart Percentage'
print ' 1 2 3 4 5 6'
for x in range(13):
if x > 1:
for i in range(6):
for j in range(6):
if (i+1)+(j+1) == x and roll_chart_6x6[(i+1, j+1)] == 0:
roll_chart_6x6[(i+1, j+1)] = data[x]
for i in range(6):
print i+1,
for j in range(6):
print '%6.2f%%' % (roll_chart_6x6[(i+1, j+1)] * 100. / n),
print
print
diceroll_log.info('6x6 test completed 100%.')
for x in range(len(data)):
data[x] = data[x] * 100. / n
return data[2:13]
log.debug(dice)
diceroll_log.debug('Asked to roll %s:' % dice)
# set dice modifier to zero.
dice_mod = 0
# check if a FATE dice roll
if dice == '4DF':
fate1 = _dierolls(3, 1) - 2
fate2 = _dierolls(3, 1) - 2
fate3 = _dierolls(3, 1) - 2
fate4 = _dierolls(3, 1) - 2
rolled = fate1 + fate2 + fate3 + fate4
diceroll_log.info('%s = %d, %d, %d, %d = %d' % (dice, fate1, fate2, fate3, fate4, rolled))
return rolled
# check if FLUX dice are being rolled
elif dice == 'FLUX':
flux1 = _dierolls(6, 1)
flux2 = _dierolls(6, 1)
rolled = flux1 - flux2
diceroll_log.info('%s = %d - %d = %d' % (dice, flux1, flux2, rolled))
return rolled
elif dice == 'GOODFLUX':
flux1 = _dierolls(6, 1)
flux2 = _dierolls(6, 1)
if flux1 < flux2:
rolled = flux2 - flux1
diceroll_log.info('%s = %d - %d = %d' % (dice, flux2, flux1, rolled))
else:
rolled = flux1 - flux2
diceroll_log.info('%s = %d - %d = %d' % (dice, flux1, flux2, rolled))
return rolled
elif dice == 'BADFLUX':
flux1 = _dierolls(6, 1)
flux2 = _dierolls(6, 1)
if flux1 > flux2:
rolled = flux2 - flux1
diceroll_log.info('%s = %d - %d = %d' % (dice, flux2, flux1, rolled))
else:
rolled = flux1 - flux2
diceroll_log.info('%s = %d - %d = %d' % (dice, flux1, flux2, rolled))
return rolled
# check if a BOON roll is being performed
elif dice == 'BOON':
die = [0, 0, 0]
die[0] = _dierolls(6, 1)
die[1] = _dierolls(6, 1)
die[2] = _dierolls(6, 1)
diceroll_log.info('Start Boon roll: %d %d %d' % (die[0], die[1], die[2]))
die_swap = True
while die_swap == True:
die_swap = False
for j in range(2):
if die[j] < die[j+1]:
temp_die = die[j]
die[j] = die[j+1]
die[j+1] = temp_die
die_swap = True
rolled = die[0] + die[1]
diceroll_log.info('Sorted Boon roll: %d %d %d = %d' % (die[0], die[1], die[2], rolled))
return rolled
# check if a BANE roll is being performed
elif dice == 'BANE':
die = [0, 0, 0]
die[0] = _dierolls(6, 1)
die[1] = _dierolls(6, 1)
die[2] = _dierolls(6, 1)
diceroll_log.info('Start Bane roll: %d %d %d' % (die[0], die[1], die[2]))
die_swap = True
while die_swap == True:
die_swap = False
for j in range(2):
if die[j] > die[j+1]:
temp_die = die[j]
die[j] = die[j+1]
die[j+1] = temp_die
die_swap = True
rolled = die[0] + die[1]
diceroll_log.info('Sorted Bane roll: %d %d %d = %d' % (die[0], die[1], die[2], rolled))
return rolled
else:
# check if T5 dice are being rolled
t5_dice = dice
dice_mod = 0
ichar2 = dice.find('+')
if ichar2 <> -1:
dice_mod = int(dice[ichar2:len(dice)])
t5_dice = dice[0:ichar2]
else:
ichar2 = dice.find('-')
if ichar2 <> -1:
dice_mod = int(dice[ichar2:len(dice)])
t5_dice = dice[0:ichar2]
if t5_dice in traveller5_dice:
num_dice = int(t5_dice[0:len(t5_dice) - 1])
rolled = _dierolls(6, num_dice) + dice_mod
diceroll_log.info('Traveller5 %s = %d%s+%d = %d' % (dice, num_dice, 'D6', dice_mod, rolled))
return rolled
# look for DD in the string (for destructive dice rolls)
ichar1 = dice.find('DD')
if ichar1 == -1:
# if not, does the string indicate regular dice for use?
ichar1 = dice.find('D')
if ichar1 == 0:
# only one die is being rolled
num_dice = 1
if ichar1 <> -1:
if ichar1 <> 0:
# how many dice are being rolled?
num_dice = int(dice[0:ichar1])
if num_dice < 1:
if num_dice < 0:
log.error('Negative dice count! [ERROR]')
diceroll_log.error('Number of dice = ' + str(num_dice) + ' [ERROR]')
if num_dice >= 1:
# is there a +/- dice modifier for the roll?
ichar2 = dice.find('+')
if ichar2 <> -1:
dice_mod = int(dice[ichar2:len(dice)])
else:
ichar2 = dice.find('-')
if ichar2 <> -1:
dice_mod = int(dice[ichar2:len(dice)])
# what kind of dice are being rolled? D6? D66? etc.
if ichar2 <> -1:
dice_type = dice[ichar1:ichar2]
else:
dice_type = dice[ichar1:len(dice)]
if | |
currentFltCat in [nextFltCat,prevFltCat]
else:
return currentFltCat == nextFltCat
def _summarizeTAFPrjs(TAFPrjs,TAFData,tafDuration=24):
"""Attempt to group based on previous TAF"""
#
# Initialization
TAFText = []
#
# Save Station Identifier and whether its an amendment
try:
ident=TAFPrjs[0].lampdata['ident']
amd=TAFPrjs[0].lampdata['amd']
except (KeyError, IndexError):
return
#
# Start with breakpoints in the official TAF
for grpnum,grp in enumerate(TAFData):
try:
shr = grp['prev']['time']['from']
ehr = grp['prev']['time']['to']
except KeyError:
continue
#
# Identify those projections and the flight category
# they correspond to.
#
prjs = [(x.flightCategory(),x) for x in TAFPrjs if shr <= x.vtime < ehr]
#
for n,cat in enumerate(prjs):
if n == 0:
numPrjs = len(prjs)-1
prjs2avg = []
crntPrj = cat[0]
nextPrj = prjs[min(n+1,numPrjs)][0]
prevPrj = prjs[max(0,n-1)][0]
if _addPrj(grpnum,n,crntPrj,nextPrj,prevPrj):
#
prjs2avg.append(cat[1])
#
# If there's a change in flight category ahead,
# average the projections gathered so far
#
if crntPrj != nextPrj:
if TAFText == []:
prjs2avg[0].lampdata['ident']=ident
prjs2avg[0].lampdata['amd']=amd
TAFText.extend(_prjAverage(prjs2avg,tafDuration))
prjs2avg = []
#
if prjs and prjs2avg:
if TAFText == []:
prjs2avg[0].lampdata['ident']=ident
prjs2avg[0].lampdata['amd']=amd
TAFText.extend(_prjAverage(prjs2avg,tafDuration))
return TAFText
class TUGPrj:
def __init__(self,**kwds):
self.__dict__.update(kwds)
try:
self.tprev['time']['from']=self.vtime
self.tprev['time']['to']=self.vtime+3600.0
except KeyError:
pass
try:
self.tocnl['time']['from']=self.vtime
self.tocnl['time']['to']=self.vtime+3600.0
except KeyError:
pass
self.wet = self._isWet()
self.pcpn_changed = self.changed = False
def checkSky(self,tafGrpInstructions,dthresholds={'up':.1,'down':.1},
wthresholds={'up':.1,'down':.1}):
"""Make changes to ceiling when guidance strongly differs"""
maxcbhgt=tafGrpInstructions.get('cbhight',50)
#
# For prevailing and occasional groups, adjust if necessary
try:
for group in [self.tprev,self.tocnl]:
if self._isGroupWet(group):
self._checkCeiling(group['sky'],wthresholds,maxcbhgt,True)
else:
self._checkCeiling(group['sky'],dthresholds,maxcbhgt,False)
#
# Determine if the sky condition is duplicated.
new_OcnlSky = []
for layer in self.tocnl['sky']['str'].split():
if not (layer.endswith('CB') or layer.startswith('VV')) and \
layer in self.tprev['sky']['str']:
continue
new_OcnlSky.append(layer)
if len(new_OcnlSky) == 0:
del self.tocnl['sky']
except KeyError:
pass
def _checkCeiling(self,taf,deltas,maxcbhgt,wet=False):
"""Adjust ceilings if necessary"""
#
if wet:
lamp=self.lampdata['csky']
lampBestCat=self.lampdata['ccig_bestCat']
probabilities=self.lampdata['ccprob']
thresholds=self.ccigthr
else:
lamp=self.lampdata['sky']
lampBestCat=self.lampdata['cig_bestCat']
probabilities=self.lampdata['cprob']
thresholds=self.cigthr
tcat = Avn.category(taf['cig'],_LAMPCeilings)
if tcat == lampBestCat:
return
#
# If LAMP and TAF both do not have a ceiling, return early
if lamp['cig'] == taf['cig'] == 99999:
return
#
# Adjust thresholds, determine if we can hit taf's category.
if tcat > lampBestCat and _inCategory(lampBestCat,thresholds,probabilities,deltas['up']):
return
if tcat < lampBestCat and _inCategory(tcat,thresholds,probabilities,deltas['down']):
return
#
# Otherwise, the guidance 'strongly' disagrees with TAF
self.cig_changed = self.changed = True
newsky = []
newceiling = []
#
# Preserve CB in sky condition, cb_skyamt serves as a flag as well
cb_skyamt = None
for lyr in taf['str'].split():
if lyr.endswith('CB'):
cb_skyamt = lyr[:3]
#
# Find layers at or below LAMP ceiling category
if lampBestCat < tcat:
# They have to be FEW or SCT layers
for layer in [x for x in taf['str'].split() if Avn.category(_getCldHt(x),_LAMPCeilings) <= lampBestCat]:
# SCT layers that match LAMP category, change to BKN
if layer[:3] == 'SCT' and Avn.category(_getCldHt(layer),_LAMPCeilings) == lampBestCat:
newceiling.append('BKN%03d' % int(_getCldHt(layer)*0.01))
else:
newsky.append(layer)
#
# If no ceiling found in LAMP category add one
if not newceiling:
maxCeiling = _LAMPNewCeilings[lampBestCat][1]
if lamp['str'] != 'SKC':
newceiling.append(lamp['str'][:3]+'%03d'%maxCeiling)
else:
newceiling.append(lamp['str'])
cb_skyamt = None
newsky = []
#
newsky.extend(newceiling)
else:
# Remove ceilings below lamp category, leave FEW and SCT alone
newsky = [x for x in taf['str'].split()
if x[:3] in ['FEW','SCT'] and \
Avn.category(_getCldHt(x),_LAMPCeilings) < lampBestCat]
newceiling = [x for x in taf['str'].split()
if Avn.category(_getCldHt(x),_LAMPCeilings) == lampBestCat]
#
if not newceiling:
if lamp['str']=='SKC':
newsky=['SKC']
else:
newsky.extend([lamp['str'][:3]+'%03d'%(_LAMPNewCeilings[lampBestCat][0])])
else:
newsky.extend(newceiling)
if cb_skyamt:
#
# If there's already a CB present, break
for i, lyr in enumerate(newsky):
if lyr.endswith('CB'):
break
else:
#
# If there's a cloud amount that matches the original TAF CB amount and its
# below a configurable max CB height
#
for i, lyr in enumerate(newsky):
try:
if cb_skyamt == lyr[:3] and int(lyr[3:6]) <= maxcbhgt:
newsky[i]+='CB'
break
except (ValueError,IndexError):
pass
else:
#
# Otherwise, use the first acceptable layer found below a configurable
# max CB height
#
for i, lyr in enumerate(newsky):
try:
if lyr[:3] in ['SCT','BKN','OVC'] and int(lyr[3:6]) <= maxcbhgt:
newsky[i]+='CB'
break
except (ValueError,IndexError):
pass
taf['str'],taf['cig'] = ' '.join(newsky), _getCldHt(newsky[-1])
def checkVsby(self,dthresholds={'up':.1,'down':.1},wthresholds={'up':.1,'down':.1}):
"""Make changes to visibility when guidance disagrees"""
# For prevailing and occasional groups, adjust if necessary
try:
for group in [self.tprev,self.tocnl]:
if self._isGroupWet(group):
self._checkVisibility(group,wthresholds,True)
else:
self._checkVisibility(group,dthresholds,False)
except KeyError:
pass
def _adjustSNDZIntensity(self,pcpn_str,intensity=None):
"""Based on visibility, the intensity of snow or drizzle may need to be adjusted"""
newPcpnStr = []
for pcp in pcpn_str.split():
result = re.compile('(?P<Pint>[+-])?[A-Z]{,6}(DZ|SN)').match(pcp)
#
# If SN and/or drizzle present
if result:
oldintensity = None
try:
oldintensity = result.group('Pint')
except AttributeError:
pass
if intensity == oldintensity:
return pcpn_str
elif intensity and not oldintensity:
newPcpnStr.append('%c%s' % (intensity,pcp))
elif oldintensity and not intensity:
newPcpnStr.append(pcp[1:])
else:
newPcpnStr.append('%c%s' % (intensity,pcp[1:]))
else:
newPcpnStr.append(pcp)
return ' '.join(newPcpnStr)
def _checkVisibility(self,taf,deltas,wet=False):
"""Adjust ceilings if necessary"""
#
if wet:
lamp=self.lampdata['cvsby']
lampBestCat=self.lampdata['cvis_bestCat']
probabilities=self.lampdata['cvprob']
thresholds=self.cvisthr
else:
lamp=self.lampdata['vsby']
lampBestCat=self.lampdata['vis_bestCat']
probabilities=self.lampdata['vprob']
thresholds=self.visthr
tcat = Avn.category(taf['vsby']['value'],_LAMPVisibilities)
if tcat == lampBestCat:
try:
if taf['obv']['str'] in ['BR','FG']:
if taf['vsby']['value'] <= 0.5:
taf['obv']['str'] = 'FG'
else:
taf['obv']['str'] = 'BR'
except KeyError:
pass
return
#
# Determine if we can hit taf's category by seeing how much its off
if tcat > lampBestCat and _inCategory(lampBestCat,thresholds,probabilities,deltas['up']):
return
if tcat < lampBestCat and _inCategory(tcat,thresholds,probabilities,deltas['down']):
return
#
# Check precip/obvis in the VFR/VLIFR cases, all other cases, TAF obvis will be accepted.
if lampBestCat < tcat:
taf['vsby'] = AvnLib.fixTafVsby(_LAMPNewVisibilities[lampBestCat][1])
#
# If LAMP forecasting VLIFR and TAF obvis is BR, change that
if lampBestCat == 1:
try:
if taf['obv'] and taf['obv']['str'] == 'BR':
taf['obv']['str'] = 'FG'
except KeyError:
pass
#
# Tedious for precipitation
try:
if lampBestCat == 1:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'+')
else:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'-')
except KeyError:
pass
if not taf.has_key('pcp') and not taf.has_key('obv'):
taf['obv'] = copy.copy(self.lampdata['obv'])
if taf['obv']['str'] == 'FG' and lampBestCat > 1:
taf['obv']['str'] = 'BR'
else:
#
# If there's obstruction to vision or precipitation, and LAMP indicates VFR
# better to accept forecaster's value in this case.
#
if lampBestCat > 5 and ('obv' in taf.keys() or self._isGroupWet(taf)):
return
#
# Otherwise, adjust according.
taf['vsby'] = AvnLib.fixTafVsby(_LAMPNewVisibilities[lampBestCat][0])
#
# Change occurrence of FG to BR
try:
if lampBestCat > 2 and taf['obv'] and taf['obv']['str'] == 'FG':
taf['obv']['str'] = 'BR'
except KeyError:
pass
#
# Tedious for precipitation
try:
if lampBestCat == 2:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'+')
else:
taf['pcp']['str'] = self._adjustSNDZIntensity(taf['pcp']['str'],'-')
except KeyError:
pass
if lampBestCat < 7 and not taf.has_key('pcp') and not taf.has_key('obv'):
taf['obv'] = copy.copy(self.lampdata['obv'])
if taf['obv']['str'] == 'FG' and lampBestCat > 1:
taf['obv']['str'] = 'BR'
def checkWind(self):
"""Simply copies LAMP winds into TAF"""
#
# Provide LAMP winds aren't missing!
if not self.lampdata['wind']['str'].startswith('?'):
self.tprev['wind']=copy.copy(self.lampdata['wind'])
def _genOcnlPcp(self,otype):
"""Add precipitation to occasional group"""
if hasattr(self,'tocnl') and self.tocnl.has_key('pcp'):
return
if not hasattr(self,'tocnl'):
self.tocnl = { 'time': { 'from':self.vtime,'to':self.vtime+3600.0 }}
else:
self.tocnl['time'] = { 'from':self.vtime,'to':self.vtime+3600.0 }
self.tocnl['type']=otype
self.tocnl['pcp'] = self.lampdata['pcp']
self.tocnl['vsby'] = self.lampdata['cvsby']
self.tocnl['sky'] = self.lampdata['csky']
try:
self.tocnl['obv'] = self.lampdata['obv']
except KeyError:
pass
def _genPrevailingPcp(self):
"""Add precipitation to prevailing group"""
self.tprev['pcp'] = self.lampdata['pcp']
self.tprev['vsby'] = self.lampdata['cvsby']
try:
if not self.tprev.has_key('obv'):
self.tprev['obv'] = self.lampdata['obv']
except KeyError:
pass
def checkPrecip(self,bbound=-.1,tbound=.1):
"""Compare guidance and official TAF to see if they agree w.r.t precipitation"""
#
# Probability 'score' combines 6-h POP and relative probability over
# climatology 0.17 ~= 1/6, bleh.
#
score = self.lampdata.get('pop6hr',0)*.17+self.lampdata['pcp']['pop']
#
# A dry TAF
if not self.wet:
# Look at the 'score' value to determine if precip is warranted
if score <= 30.0:
return
elif 30 < score <= 50.0:
if self.lampprj > 9:
self._genOcnlPcp('PROB')
elif 50 < score <= 70.0:
self._genOcnlPcp('TEMPO')
else:
self._genPrevailingPcp()
return
#
# TAF is wet, but LAMP indicates dry
elif self.lampdata['pcp']['pcat'] == 0:
#
# if prevailing group of TAF is wet...
if self._isGroupWet(self.tprev):
#
# Use the freezing precipitation that LAMP suggests
if 'FZ' in self.lampdata['pcp']['str'] and \
not 'FZ' in self.tprev['pcp']['str']:
self.tprev['pcp']=self.lampdata['pcp']
#
# but if probablity is low, demote or remove
if score < 40.0:
if self._tsCheck(self.tprev):
self.tprev['pcp']['str'] = 'TS'
else:
del self.tprev['pcp']
#
# Add the appropriate group
if 30 > score >= | |
<reponame>ixc/django-compressor
from __future__ import with_statement, unicode_literals
import copy
from contextlib import contextmanager
import io
import os
from importlib import import_module
from mock import patch
from unittest import SkipTest
from django.core.management import call_command
from django.core.management.base import CommandError
from django.template import Template, Context
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from compressor.cache import flush_offline_manifest, get_offline_manifest
from compressor.conf import settings
from compressor.exceptions import OfflineGenerationError
from compressor.management.commands.compress import Command as CompressCommand
from compressor.storage import default_storage
from compressor.utils import get_mod_func
from django.urls import get_script_prefix, set_script_prefix
def offline_context_generator():
for i in range(1, 4):
yield {'content': 'OK %d!' % i}
def static_url_context_generator():
yield {'STATIC_URL': settings.STATIC_URL}
class LazyScriptNamePrefixedUrl(six.text_type):
"""
Lazy URL with ``SCRIPT_NAME`` WSGI param as path prefix.
.. code-block :: python
settings.STATIC_URL = LazyScriptNamePrefixedUrl('/static/')
# HTTP request to '/some/page/' without SCRIPT_NAME
str(settings.STATIC_URL) == '/static/'
# HTTP request to '/app/prefix/some/page/` with SCRIPT_NAME = '/app/prefix/'
str(settings.STATIC_URL) == '/app/prefix/static/'
# HTTP request to '/another/prefix/some/page/` with SCRIPT_NAME = '/another/prefix/'
str(settings.STATIC_URL) == '/another/prefix/static/'
The implementation is incomplete, all ``str`` methods must be overridden
in order to work correctly with the rest of Django core.
"""
def __str__(self):
return get_script_prefix() + self[1:] if self.startswith('/') else self
def __unicode__(self):
return str(self)
def split(self, *args, **kwargs):
"""
Override ``.split()`` method to make it work with ``{% static %}``.
"""
return six.text_type(self).split(*args, **kwargs)
@contextmanager
def script_prefix(new_prefix):
"""
Override ``SCRIPT_NAME`` WSGI param, yield, then restore its original value.
:param new_prefix: New ``SCRIPT_NAME`` value.
"""
old_prefix = get_script_prefix()
set_script_prefix(new_prefix)
yield
set_script_prefix(old_prefix)
class OfflineTestCaseMixin(object):
template_name = 'test_compressor_offline.html'
# Change this for each test class
templates_dir = ''
expected_basename = 'output'
expected_hash = ''
# Engines to test
engines = ('django', 'jinja2')
additional_test_settings = None
def setUp(self):
# Reset template dirs, because it enables us to force compress to
# consider only a specific directory (helps us make true,
# independent unit tests).
# Specify both Jinja2 and Django template locations. When the wrong
# engine is used to parse a template, the TemplateSyntaxError will
# cause the template to be skipped over.
# We've hardcoded TEMPLATES[0] to be Django templates backend and
# TEMPLATES[1] to be Jinja2 templates backend in test_settings.
TEMPLATES = copy.deepcopy(settings.TEMPLATES)
django_template_dir = os.path.join(
TEMPLATES[0]['DIRS'][0], self.templates_dir)
jinja2_template_dir = os.path.join(
TEMPLATES[1]['DIRS'][0], self.templates_dir)
TEMPLATES[0]['DIRS'] = [django_template_dir]
TEMPLATES[1]['DIRS'] = [jinja2_template_dir]
override_settings = {
'TEMPLATES': TEMPLATES,
'COMPRESS_ENABLED': True,
'COMPRESS_OFFLINE': True
}
if 'jinja2' in self.engines:
override_settings['COMPRESS_JINJA2_GET_ENVIRONMENT'] = (
lambda: self._get_jinja2_env())
if self.additional_test_settings is not None:
override_settings.update(self.additional_test_settings)
self.override_settings = self.settings(**override_settings)
self.override_settings.__enter__()
if 'django' in self.engines:
self.template_path = os.path.join(
django_template_dir, self.template_name)
with io.open(self.template_path,
encoding=settings.FILE_CHARSET) as file_:
self.template = Template(file_.read())
if 'jinja2' in self.engines:
self.template_path_jinja2 = os.path.join(
jinja2_template_dir, self.template_name)
jinja2_env = override_settings['COMPRESS_JINJA2_GET_ENVIRONMENT']()
with io.open(self.template_path_jinja2,
encoding=settings.FILE_CHARSET) as file_:
self.template_jinja2 = jinja2_env.from_string(file_.read())
def tearDown(self):
self.override_settings.__exit__(None, None, None)
manifest_path = os.path.join('CACHE', 'manifest.json')
if default_storage.exists(manifest_path):
default_storage.delete(manifest_path)
def _prepare_contexts(self, engine):
contexts = settings.COMPRESS_OFFLINE_CONTEXT
if not isinstance(contexts, (list, tuple)):
contexts = [contexts]
if engine == 'django':
return [Context(c) for c in contexts]
if engine == 'jinja2':
return contexts
return None
def _render_template(self, engine):
contexts = self._prepare_contexts(engine)
if engine == 'django':
return ''.join(self.template.render(c) for c in contexts)
if engine == 'jinja2':
return '\n'.join(
self.template_jinja2.render(c) for c in contexts) + '\n'
return None
def _render_script(self, hash):
return (
'<script type="text/javascript" src="{}CACHE/js/{}.{}.js">'
'</script>'.format(
settings.COMPRESS_URL_PLACEHOLDER, self.expected_basename, hash
)
)
def _render_link(self, hash):
return (
'<link rel="stylesheet" href="{}CACHE/css/{}.{}.css" '
'type="text/css" />'.format(
settings.COMPRESS_URL_PLACEHOLDER, self.expected_basename, hash
)
)
def _render_result(self, result, separator='\n'):
return (separator.join(result) + '\n').replace(
settings.COMPRESS_URL_PLACEHOLDER, six.text_type(settings.COMPRESS_URL)
)
def _test_offline(self, engine):
hashes = self.expected_hash
if not isinstance(hashes, (list, tuple)):
hashes = [hashes]
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(len(hashes), count)
self.assertEqual([self._render_script(h) for h in hashes], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result))
def test_offline_django(self):
if 'django' not in self.engines:
raise SkipTest('This test class does not support django engine.')
self._test_offline(engine='django')
def test_offline_jinja2(self):
if 'jinja2' not in self.engines:
raise SkipTest('This test class does not support jinja2 engine.')
self._test_offline(engine='jinja2')
def _get_jinja2_env(self):
import jinja2
import jinja2.ext
from compressor.offline.jinja2 import url_for, SpacelessExtension
from compressor.contrib.jinja2ext import CompressorExtension
# Extensions needed for the test cases only.
extensions = [
CompressorExtension,
SpacelessExtension,
jinja2.ext.with_,
jinja2.ext.do,
]
loader = self._get_jinja2_loader()
env = jinja2.Environment(extensions=extensions, loader=loader)
env.globals['url_for'] = url_for
return env
def _get_jinja2_loader(self):
import jinja2
loader = jinja2.FileSystemLoader(
settings.TEMPLATES[1]['DIRS'], encoding=settings.FILE_CHARSET)
return loader
class OfflineCompressBasicTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'basic'
expected_hash = 'a432b6ddb2c4'
@patch.object(CompressCommand, 'compress')
def test_handle_no_args(self, compress_mock):
compress_mock.return_value = {}, 1, []
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 1)
@patch.object(CompressCommand, 'compress')
def test_handle_compress_disabled(self, compress_mock):
with self.settings(COMPRESS_ENABLED=False):
with self.assertRaises(CommandError):
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 0)
@patch.object(CompressCommand, 'compress')
def test_handle_compress_offline_disabled(self, compress_mock):
with self.settings(COMPRESS_OFFLINE=False):
with self.assertRaises(CommandError):
CompressCommand().handle()
self.assertEqual(compress_mock.call_count, 0)
@patch.object(CompressCommand, 'compress')
def test_handle_compress_offline_disabled_force(self, compress_mock):
compress_mock.return_value = {}, 1, []
with self.settings(COMPRESS_OFFLINE=False):
CompressCommand().handle(force=True)
self.assertEqual(compress_mock.call_count, 1)
def test_rendering_without_manifest_raises_exception(self):
# flush cached manifest
flush_offline_manifest()
self.assertRaises(OfflineGenerationError,
self.template.render, Context({}))
def test_rendering_without_manifest_raises_exception_jinja2(self):
# flush cached manifest
flush_offline_manifest()
self.assertRaises(OfflineGenerationError,
self.template_jinja2.render, {})
def _test_deleting_manifest_does_not_affect_rendering(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
get_offline_manifest()
manifest_path = os.path.join('CACHE', 'manifest.json')
if default_storage.exists(manifest_path):
default_storage.delete(manifest_path)
self.assertEqual(1, count)
self.assertEqual([self._render_script(self.expected_hash)], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result))
def test_deleting_manifest_does_not_affect_rendering(self):
for engine in self.engines:
self._test_deleting_manifest_does_not_affect_rendering(engine)
def test_get_loaders(self):
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
with self.settings(TEMPLATE_LOADERS=TEMPLATE_LOADERS):
from django.template.loaders.filesystem import (
Loader as FileSystemLoader)
from django.template.loaders.app_directories import (
Loader as AppDirectoriesLoader)
loaders = CompressCommand().get_loaders()
self.assertTrue(isinstance(loaders[0], FileSystemLoader))
self.assertTrue(isinstance(loaders[1], AppDirectoriesLoader))
@patch("compressor.offline.django.DjangoParser.render_node",
side_effect=Exception(b"non-ascii character here:\xc3\xa4"))
def test_non_ascii_exception_messages(self, mock):
with self.assertRaises(CommandError):
CompressCommand().handle(verbosity=0)
class OfflineCompressSkipDuplicatesTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_duplicate'
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
# Only one block compressed, the second identical one was skipped.
self.assertEqual(1, count)
# Only 1 <script> block in returned result as well.
self.assertEqual([self._render_script('a432b6ddb2c4')], result)
rendered_template = self._render_template(engine)
# But rendering the template returns both (identical) scripts.
self.assertEqual(
rendered_template, self._render_result(result * 2, ''))
class SuperMixin:
# Block.super not supported for Jinja2 yet.
engines = ('django',)
class OfflineCompressBlockSuperTestCase(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super'
expected_hash = '68c645740177'
class OfflineCompressBlockSuperMultipleTestCase(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super_multiple'
expected_hash = 'f87403f4d8af'
class OfflineCompressBlockSuperMultipleCachedLoaderTestCase(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super_multiple_cached'
expected_hash = 'ea860151aa21'
additional_test_settings = {
'TEMPLATE_LOADERS': (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
}
class OfflineCompressBlockSuperTestCaseWithExtraContent(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_block_super_extra'
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(2, count)
self.assertEqual([
self._render_script('9717f9c7e9ff'),
self._render_script('68c645740177')
], result)
rendered_template = self._render_template(engine)
self.assertEqual(rendered_template, self._render_result(result, ''))
class OfflineCompressConditionTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_condition'
expected_hash = '58517669cb7c'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'condition': 'red',
}
}
class OfflineCompressTemplateTagTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_templatetag'
expected_hash = '16f8880b81ab'
class OfflineCompressStaticTemplateTagTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_static_templatetag'
expected_hash = '2607a2085687'
class OfflineCompressTemplateTagNamedTestCase(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_templatetag_named'
expected_basename = 'output_name'
expected_hash = 'a432b6ddb2c4'
class OfflineCompressTestCaseWithContext(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
expected_hash = '045b3ad664c8'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'content': 'OK!',
}
}
class OfflineCompressTestCaseWithContextSuper(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context_super'
expected_hash = '9a8b47adfe17'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'content': 'OK!',
}
}
class OfflineCompressTestCaseWithContextList(OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
expected_hash = ['3b6cd13d4bde', '5aef37564182', 'c6d6c723a18b']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': list(offline_context_generator())
}
def _prepare_contexts(self, engine):
if engine == 'django':
return [Context(c) for c in settings.COMPRESS_OFFLINE_CONTEXT]
if engine == 'jinja2':
return settings.COMPRESS_OFFLINE_CONTEXT
return None
class OfflineCompressTestCaseWithContextListSuper(
SuperMixin, OfflineCompressTestCaseWithContextList):
templates_dir = 'test_with_context_super'
expected_hash = ['dc68dd60aed4', 'c2e50f475853', '045b48455bee']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': list(offline_context_generator())
}
class OfflineCompressTestCaseWithContextGenerator(
OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context'
expected_hash = ['3b6cd13d4bde', '5aef37564182', 'c6d6c723a18b']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': 'compressor.tests.test_offline.'
'offline_context_generator'
}
def _prepare_contexts(self, engine):
module, function = get_mod_func(settings.COMPRESS_OFFLINE_CONTEXT)
contexts = getattr(import_module(module), function)()
if engine == 'django':
return (Context(c) for c in contexts)
if engine == 'jinja2':
return contexts
return None
class OfflineCompressTestCaseWithContextGeneratorSuper(
SuperMixin, OfflineCompressTestCaseWithContextGenerator):
templates_dir = 'test_with_context_super'
expected_hash = ['dc68dd60aed4', 'c2e50f475853', '045b48455bee']
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': 'compressor.tests.test_offline.'
'offline_context_generator'
}
class OfflineCompressStaticUrlIndependenceTestCase(
OfflineCompressTestCaseWithContextGenerator):
"""
Test that the offline manifest is independent of STATIC_URL.
I.e. users can use the manifest with any other STATIC_URL in the future.
"""
templates_dir = 'test_static_url_independence'
expected_hash = '5014de5edcbe'
additional_test_settings = {
'STATIC_URL': '/custom/static/url/',
# We use ``COMPRESS_OFFLINE_CONTEXT`` generator to make sure that
# ``STATIC_URL`` is not cached when rendering the template.
'COMPRESS_OFFLINE_CONTEXT': (
'compressor.tests.test_offline.static_url_context_generator'
)
}
def _test_offline(self, engine):
count, result = CompressCommand().handle_inner(engines=[engine], verbosity=0)
self.assertEqual(1, count)
self.assertEqual([self._render_script(self.expected_hash)], result)
self.assertEqual(
self._render_template(engine), self._render_result(result))
# Changing STATIC_URL setting doesn't break things despite that
# offline compression was made with different STATIC_URL.
with self.settings(STATIC_URL='/another/static/url/'):
self.assertEqual(
self._render_template(engine), self._render_result(result))
class OfflineCompressTestCaseWithContextVariableInheritance(
OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context_variable_inheritance'
expected_hash = '0d88c897f64a'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': {
'parent_template': 'base.html',
}
}
def _render_result(self, result, separator='\n'):
return '\n' + super(
OfflineCompressTestCaseWithContextVariableInheritance, self
)._render_result(result, separator)
class OfflineCompressTestCaseWithContextVariableInheritanceSuper(
SuperMixin, OfflineTestCaseMixin, TestCase):
templates_dir = 'test_with_context_variable_inheritance_super'
additional_test_settings = {
'COMPRESS_OFFLINE_CONTEXT': [{
'parent_template': 'base1.html',
}, {
'parent_template': 'base2.html',
}]
}
| |
<reponame>gifford-lab/seqgra<filename>seqgra/evaluator/gradientbased/gradientbasedevaluator.py
"""Gradient-based Feature Importance Evaluators
"""
from abc import abstractmethod
import math
from typing import Any, List, Optional
import numpy as np
import pandas as pd
import torch
import seqgra.constants as c
from seqgra.evaluator import FeatureImportanceEvaluator
from seqgra.learner import Learner
class GradientBasedEvaluator(FeatureImportanceEvaluator):
"""Abstract base class for gradient-based feature importance evaluators
Only supports PyTorch models.
"""
@abstractmethod
def __init__(self, evaluator_id: str, evaluator_name: str,
learner: Learner, output_dir: str,
importance_threshold: Optional[float] = None,
silent: bool = False) -> None:
super().__init__(evaluator_id, evaluator_name, learner, output_dir,
supported_libraries=[c.LibraryType.TORCH],
silent=silent)
if importance_threshold:
self.importance_threshold = importance_threshold
else:
self.importance_threshold = 0.01
@abstractmethod
def explain(self, x, y):
pass
# result = self.explainer.explain(data, label)
# return self._explainer_transform(data, result)
# def _explainer_transform(self, data, result):
# return result.cpu().numpy()
def _evaluate_model(self, x: List[str], y: List[str],
annotations: List[str]) -> Any:
# encode
encoded_x = self.learner.encode_x(x)
encoded_y = self.learner.encode_y(y)
# convert bool to float32 and long, as expected by explainers
encoded_x = encoded_x.astype(np.float32)
encoded_y = encoded_y.astype(np.int64)
self._check_tensor_dimensions(encoded_x)
# convert np array to torch tensor
encoded_x = torch.from_numpy(encoded_x)
encoded_y = torch.from_numpy(encoded_y)
# store input tensor, label tensor and model on correct device
encoded_x = encoded_x.to(self.learner.device)
encoded_y = encoded_y.to(self.learner.device)
self.learner.model.to(self.learner.device)
encoded_x = torch.autograd.Variable(encoded_x, requires_grad=True)
# enable inference mode
self.learner.model.eval()
fi_matrix = self.explain(encoded_x, encoded_y)
fi_matrix = fi_matrix.cpu().numpy()
self._check_tensor_dimensions(fi_matrix)
fi_matrix = self._convert_to_nwc(fi_matrix)
return (fi_matrix, x, y, annotations)
def get_layer(self, key_list):
a = self.learner.model
for key in key_list:
a = a._modules[key]
return a
def _convert_to_nwc(self, x) -> Any:
if self.learner.definition.library == c.LibraryType.TENSORFLOW and \
self.learner.definition.input_encoding == "2D":
# from (N, H, W, C) to (N, W, C)
x = np.squeeze(x, axis=1)
elif self.learner.definition.library == c.LibraryType.TORCH:
if self.learner.definition.input_encoding == "2D":
# from (N, C, 1, W) to (N, C, W)
x = np.squeeze(x, axis=2)
# from (N, C, W) to (N, W, C)
x = np.transpose(x, (0, 2, 1))
return x
def _write_result_df(
self, fi_matrix, x: List[str], y: List[str],
annotations: List[str], set_name: str = "test") -> None:
precision_column: List[float] = list()
recall_column: List[float] = list()
specificity_column: List[float] = list()
f1_column: List[float] = list()
for example_id, annotation in enumerate(annotations):
fi_vector = GradientBasedEvaluator._prepare_fi_vector(
fi_matrix[example_id, :, :])
fi_vector = GradientBasedEvaluator._normalize_fi_vector(fi_vector)
precision_column += \
[GradientBasedEvaluator._calculate_smooth_precision(
fi_vector, annotation)]
recall_column += [GradientBasedEvaluator._calculate_smooth_recall(
fi_vector, annotation)]
specificity_column += \
[GradientBasedEvaluator._calculate_smooth_specificity(
fi_vector, annotation)]
f1_column += [GradientBasedEvaluator._calculate_smooth_f1(
fi_vector, annotation)]
df = pd.DataFrame({"x": x,
"y": y,
"annotation": annotations,
"precision": precision_column,
"recall": recall_column,
"specificity": specificity_column,
"f1": f1_column})
df.to_csv(self.output_dir + set_name + "-df.txt", sep="\t",
index=False)
def _save_results(self, results, set_name: str = "test",
suppress_plots: bool = False) -> None:
np.save(self.output_dir + set_name + "-feature-importance-matrix.npy",
results[0])
self._write_result_df(results[0], results[1], results[2], results[3],
set_name)
def _visualize_grammar_agreement(self, results,
set_name: str = "test") -> None:
self._visualize_thresholded_grammar_agreement(results, set_name)
self._visualize_unthresholded_grammar_agreement(results, set_name)
def _visualize_thresholded_grammar_agreement(
self, results, set_name: str = "test") -> None:
df: pd.DataFrame = self._convert_to_data_frame(results)
if len(df.index) > 0:
df.to_csv(self.output_dir + set_name +
"-grammar-agreement-thresholded-df.txt",
sep="\t", index=False)
pdf_file_name: str = set_name + "-grammar-agreement-thresholded.pdf"
df: pd.DataFrame = self._prepare_r_data_frame(df)
caption: str = "feature importance threshold: " + \
str(self.importance_threshold) + \
":NL:(positions above this threshold are considered grammar positions)"
self._execute_plotting_command(df, set_name, pdf_file_name,
self.evaluator_name, caption)
def _visualize_unthresholded_grammar_agreement(
self, results, set_name: str = "test") -> None:
df: pd.DataFrame = GradientBasedEvaluator._convert_to_unthresholded_data_frame(
results)
if len(df.index) > 0:
df.to_csv(self.output_dir + set_name + "-grammar-agreement-df.txt",
sep="\t", index=False)
pdf_file_name: str = set_name + "-grammar-agreement.pdf"
df: pd.DataFrame = self._prepare_unthresholded_r_data_frame(df)
caption: str = "luminosity encodes feature importance: from light (low feature importance) to dark (high feature importance):NL:hue encodes annotation: green (grammar position), red (background position)"
self._execute_plotting_command(df, set_name, pdf_file_name,
self.evaluator_name, caption)
def _prepare_unthresholded_r_data_frame(self,
df: pd.DataFrame) -> pd.DataFrame:
df["precision"] = 0.0
df["recall"] = 0.0
df["specificity"] = 0.0
df["f1"] = 0.0
df["n"] = 0
for example_id in set(df.example.tolist()):
example_df = df.loc[df.example == example_id]
fi_vector = np.asarray(example_df.value.tolist(), dtype=np.float32)
annotation: str = "".join(example_df.group.tolist())
df.loc[df.example == example_id, "precision"] = \
GradientBasedEvaluator._calculate_smooth_precision(
fi_vector, annotation)
df.loc[df.example == example_id, "recall"] = \
GradientBasedEvaluator._calculate_smooth_recall(
fi_vector, annotation)
df.loc[df.example == example_id, "specificity"] = \
GradientBasedEvaluator._calculate_smooth_specificity(
fi_vector, annotation)
df.loc[df.example == example_id, "f1"] = \
GradientBasedEvaluator._calculate_smooth_f1(
fi_vector, annotation)
df.loc[df.example == example_id, "n"] = 1.0 / len(example_df.index)
df["precision"] = df.groupby("label")["precision"].transform("mean")
df["recall"] = df.groupby("label")["recall"].transform("mean")
df["specificity"] = df.groupby(
"label")["specificity"].transform("mean")
df["f1"] = df.groupby("label")["f1"].transform("mean")
df["n"] = round(df.groupby("label")["n"].transform("sum"))
return df
def _check_tensor_dimensions(self, tensor) -> None:
if self.learner.definition.library == c.LibraryType.TENSORFLOW:
if self.learner.definition.input_encoding == "2D":
expected_shape: str = "(N, 1, W, C)"
channel_dim: int = 3
height_dim: int = 1
n_dims: int = 4
else:
expected_shape: str = "(N, W, C)"
channel_dim: int = 2
height_dim: int = None
n_dims: int = 3
elif self.learner.definition.library == c.LibraryType.TORCH:
channel_dim: int = 1
if self.learner.definition.input_encoding == "2D":
expected_shape: str = "(N, C, 1, W)"
height_dim: int = 2
n_dims: int = 4
else:
expected_shape: str = "(N, C, W)"
height_dim: int = None
n_dims: int = 3
if len(tensor.shape) != n_dims:
raise Exception("tensor shape invalid: expected " +
expected_shape + ", got " +
str(tensor.shape))
if height_dim and tensor.shape[height_dim] != 1:
raise Exception("tensor shape invalid: expected "
"height dimension size of 1, got " +
str(tensor.shape[height_dim]))
if self.learner.definition.sequence_space == c.SequenceSpaceType.DNA:
if tensor.shape[channel_dim] != 4:
raise Exception("tensor shape invalid for DNA "
"sequence space: expected 4 channels, got " +
str(tensor.shape[channel_dim]))
elif self.learner.definition.sequence_space == c.SequenceSpaceType.PROTEIN:
if tensor.shape[channel_dim] != 20:
raise Exception("tensor shape invalid for protein "
"sequence space: expected 20 "
"channels, got " +
str(tensor.shape[channel_dim]))
def _convert_to_data_frame(self, results) -> pd.DataFrame:
"""Takes gradient-based evaluator-specific results and turns them into
a pandas data frame.
The data frame has the following columns:
- example_column (int): example index
- position (int): position within example (one-based)
- group (str): group label, one of the following:
- "TP": grammar position, important for model prediction
- "FN": grammar position, not important for model prediction,
- "FP": background position, important for model prediction,
- "TN": background position, not important for model prediction
- label (str): label of example, e.g., "cell type 1"
"""
fi_matrix = results[0]
y: List[str] = results[2]
annotations: List[str] = results[3]
example_column: List[int] = list()
position_column: List[int] = list()
group_column: List[str] = list()
label_column: List[str] = list()
for example_id, annotation in enumerate(annotations):
example_column += [example_id] * len(annotation)
position_column += list(range(1, len(annotation) + 1))
group_column += [self.__get_agreement_group(
char, fi_matrix[example_id, i, :])
for i, char in enumerate(annotation)]
label_column += [y[example_id]] * len(annotation)
df = pd.DataFrame({"example": example_column,
"position": position_column,
"group": group_column,
"label": label_column})
return df
def __get_agreement_group(self, annotation_position: str,
importance_vector) -> str:
if annotation_position == c.PositionType.GRAMMAR:
if np.max(importance_vector) < self.importance_threshold:
return "FN"
else:
return "TP"
else:
if np.max(importance_vector) < self.importance_threshold:
return "TN"
else:
return "FP"
@staticmethod
def _convert_to_unthresholded_data_frame(results) -> pd.DataFrame:
"""Takes gradient-based evaluator-specific results and turns them into
a pandas data frame.
The data frame has the following columns:
- example_column (int): example index
- position (int): position within example (one-based)
- value (float): normalized feature importance
- group (str): group label, one of the following:
- "G": grammar position
- "C": confounding position
- "_": background position
- label (str): label of example, e.g., "cell type 1"
"""
fi_matrix = results[0]
y: List[str] = results[2]
annotations: List[str] = results[3]
example_column: List[int] = list()
position_column: List[int] = list()
value_column: List[int] = list()
unnormalized_value_column: List[int] = list()
group_column: List[str] = list()
label_column: List[str] = list()
for example_id, annotation in enumerate(annotations):
example_column += [example_id] * len(annotation)
position_column += list(range(1, len(annotation) + 1))
fi_vector = GradientBasedEvaluator._prepare_fi_vector(
fi_matrix[example_id, :, :])
unnormalized_value_column += list(fi_vector)
normalized_fi_vector = GradientBasedEvaluator._normalize_fi_vector(
fi_vector)
value_column += list(normalized_fi_vector)
group_column += list(annotation)
label_column += [y[example_id]] * len(annotation)
df = pd.DataFrame({"example": example_column,
"position": position_column,
"value": value_column,
"unnormalized_value": unnormalized_value_column,
"group": group_column,
"label": label_column})
return df
@staticmethod
def _calculate_smooth_precision(normalized_fi_vector,
annotation: str) -> float:
grammar_positions: List[int] = [i
for i, char in enumerate(annotation)
if char == c.PositionType.GRAMMAR]
non_grammar_positions: List[int] = [i
for i, char in enumerate(annotation)
if char != c.PositionType.GRAMMAR]
total_fi: float = normalized_fi_vector.sum()
grammar_fi: float = normalized_fi_vector[grammar_positions].sum()
non_grammar_fi: float = normalized_fi_vector[non_grammar_positions].sum(
)
if not grammar_positions and not math.isclose(non_grammar_fi, 0.0):
return 1.0
elif not grammar_positions:
return 0.0
elif math.isclose(total_fi, 0.0):
return 0.0
else:
return grammar_fi / total_fi
@staticmethod
def _calculate_smooth_recall(normalized_fi_vector,
annotation: str) -> float:
grammar_positions: List[int] = [i
for i, char in enumerate(annotation)
if char == c.PositionType.GRAMMAR]
non_grammar_positions: List[int] = \
[i
for i, char in enumerate(annotation)
if char != c.PositionType.GRAMMAR]
grammar_fi: float = normalized_fi_vector[grammar_positions].sum()
non_grammar_fi: float = \
| |
map as a separate .csv file. to be run
after clean() on the ./cleaned directory, also outputs a file
identifying the sizes of structures, so the largest value can be used
with HeaderAsPSaM()
'''
os.makedirs('./Completed', exist_ok=True)
os.makedirs('./Error_NotEqual', exist_ok=True)
os.makedirs('./Error_Broken', exist_ok=True)
os.makedirs('./Error_Small', exist_ok=True)
for File in tqdm.tqdm(os.listdir(directory)):
try:
TheFile = '{}/{}'.format(directory, File)
pose = pose_from_pdb(TheFile)
DSSP = pyrosetta.rosetta.protocols.moves.DsspMover()
DSSP.apply(pose)
sasa_calc = pyrosetta.rosetta.core.scoring.sasa.SasaCalc()
sasa_calc.calculate(pose)
size = pose.total_residue()
aa = []
ss = []
phi = []
psi = []
sasa = []
info = []
ctmp = []
m = []
surf = list(sasa_calc.get_residue_sasa())
for r in range(size):
if pose.residue(r+1).is_protein():
aa.append(pose.sequence(r+1, r+1))
ss.append(pose.secstruct(r+1))
p = pose.phi(r+1)
if p < 0: p = p+360
phi.append(p)
s = pose.psi(r+1)
if s < 0: s = s+360
psi.append(s)
sasa.append(surf[r])
for r in range(0, size):
for R in range(0, size):
if pose.residue(r+1).is_protein() and\
pose.residue(R+1).is_protein():
CAr = pose.residue(r+1).xyz('CA')
CAR = pose.residue(R+1).xyz('CA')
CAr_CAR_vector = CAR-CAr
Cont = CAr_CAR_vector.norm()
if Cont <= 12: ctmp.append(Cont)
else: ctmp.append(0)
if len(aa) >= 50:
try:
assert len(aa) == len(ss) == len(phi)\
== len(psi) == len(sasa) == math.sqrt(len(ctmp))
for AA,SS,P,S,SASA in zip(aa,ss,phi,psi,sasa):
info.append('{},{},{},{},{}'\
.format(AA, SS, P, S, SASA))
Info = ','.join(info)
with open('./AsPSa_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + Info + '\n')
with open('lengths.txt', 'a') as length:
length.write(str(len(aa))+',')
for x in ctmp:
m.append('{}'.format(x))
M = ','.join(m)
with open('./M_noheader_nofill.csv', 'a') as data:
data.write(File + ',' + M + '\n')
os.system('mv {} ./Completed'.format(TheFile))
except:
os.system('mv {} ./Error_NotEqual'\
.format(TheFile))
else: os.system('mv {} ./Error_Small'.format(TheFile))
except: passos.system('mv {} ./Error_Broken'.format(TheFile))
def HeaderAsPSaM(self, choice='AsPSa'):
'''
Constructs a .csv header and completes the dataset. To find the value of
the largest structure run: sort -nk 1 lengths.txt
'''
with open('lengths.txt', 'r') as L:
length = int(max(L.readlines()[0].strip().split(',')))
header = ['PDB_ID']
if choice == 'AsPSa':
for i in range(1, length+1):
header.append(',aa_{},ss_{},phi_{},psi_{},sasa_{}'\
.format(i, i, i, i, i))
header = ''.join(header)
with open('./AsPSa_noheader_nofill.csv', 'r') as data:
with open('./AsPSa_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./AsPSa_noheader_nofill.csv')
elif choice == 'M':
for r in range(1, length+1):
for c in range(1, length+1):
header.append(',{}{}'.format(r, c))
header = ''.join(header)
with open('./M_noheader_nofill.csv', 'r') as data:
with open('./M_nofill.csv', 'w') as head:
head.write(header+'\n')
for line in data:
head.write(line)
os.remove('./M_noheader_nofill.csv')
def Fill(self, filename):
''' Fills missing .csv table spaces with zeros '''
name = filename.split('_')[0]
with open(filename) as f:
with open(name+'.csv', 'a') as F:
first_line = f.readline()
F.write(first_line)
size = len(first_line.strip().split(','))
for line in f:
line = line.strip().split(',')
gap = size - len(line)
for zero in range(gap):
line.append('0')
new_line = ','.join(line)
F.write(new_line + '\n')
os.remove(filename)
def VectoriseAsPSaM(self, filenameA='AsPSa.csv', filenameM='M.csv'):
'''
This function vectorises the backbone PS and CM datasets, normalises
them, combines them, as well as constructs the final tensor and
export the result as a serial.
'''
pass
def build(self, switches='', directory='PDBDatabase'):
if len(switches) == 20:
switch = list(switches)
if switch[0] == '1': self.Database('DATABASE', directory)
if switch[1] == '1': self.Extract(directory)
if switch[2] == '1': self.NonProtein(directory)
if switch[3] == '1': self.Size(directory, 80, 150)
if switch[4] == '1': self.Break(directory)
if switch[5] == '1': self.Loops(directory, 10)
if switch[6] == '1': self.Renumber(directory)
if switch[7] == '1': self.Rg(directory, 15)
########## --- HUMAN EYE FILTERING --- ##########
if switch[8] == '1': self.Clean(directory)
if switch[9] == '1': self.Path('PDBCleaned', '{PATH}')
if switch[10] == '1': self.RelaxHPC('~/Rosetta', 829)
if switch[11] == '1': self.Relax('PDBCleaned')
if switch[12] == '1': self.DatasetAsPSaM('PDBCleaned')
if switch[13] == '1': self.HeaderAsPSaM('AsPSa')
if switch[14] == '1':
self.HeaderAsPSaM('M')
os.remove('lengths.txt')
if switch[15] == '1':
self.Fill('AsPSa_nofill.csv')
self.Fill('M_nofill.csv')
if switch[16] == '1': self.DatasetPSCM('PDBCleaned')
if switch[17] == '1': self.C_Max('dataset_CM.csv')
if switch[18] == '1': self.VectorisePSCM()
if switch[18] == '1': self.VectoriseAsPSaM()
else: print('\x1b[31m[-] Error\x1b[33m: Wrong string length\x1b[0m')
def Vall(filename='vall.jul19.2011', m=16800, nx=1490):
'''
Compile the PDB IDs, chains, phi, psi, omega, and SASA of all the structures
from the Rosetta vall.jul19.2011 database into a .csv file
'''
assert os.path.isfile('./{}'.format(filename)),\
'Make sure the vall.jul19.2011 file is in the same directory as this script'
with open(filename, 'r') as f:
with open('Fragments.csv', 'w') as F:
header = ['PDBID,Chain']
for i in range(1, nx+1):
header.append(',AA_{},SS_{},P_{},S_{},O_{},SASA_{}'\
.format(i, i, i, i, i, i))
header = ''.join(header)
F.write(header + '\n')
for i in range(30): next(f)
ID = []
CH = []
AA = []
SS = []
P = []
S = []
O = []
SASA= []
ID_seen = set()
for line in f:
line = line.strip().split()
if line[0] not in ID_seen:
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
if exp == '': pass
else: F.write(ID + ',' + CH + ',' + exp + '\n')
ID = None
CH = None
AA = []
SS = []
P = []
S = []
O = []
SASA = []
ID_seen.add(line[0])
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
else:
ID = line[0][:4].upper()
CH = line[0][-1].upper()
AA.append(line[1])
SS.append(line[2])
P.append(line[14])
S.append(line[15])
O.append(line[16])
SASA.append(line[19])
exp = []
for aa, ss, p, s, o, sasa in zip(AA, SS, P, S, O, SASA):
exp.append('{},{},{},{},{},{}'\
.format(aa, ss, p, s, o, sasa))
exp = ','.join(exp)
F.write(ID + ',' + CH + ',' + exp)
def Frag_vectorise(filename='Fragments.csv', nx=1452):
''' Vectorises the fragments dataset, normalises it, then serialises it '''
# 1. Import data
rows = len(open(filename).readlines()) - 1
# 2. Generate a list of random number of rows
lines = list(range(1, rows + 1))
random.shuffle(lines)
# 3. Open CSV file
with open(filename, 'r') as File: all_lines_variable = File.readlines()
PDBID, CHAIN, X, Y = [], [], [], []
for i in tqdm.tqdm(lines):
# 4. Import data line by line
line = all_lines_variable[i]
line = line.strip().split(',')
if line[0] == '1OFD': continue # Causes an error
aa = np.array(line[2::6])
ss = np.array(line[3::6])
p = np.array(line[4::6])
s = np.array(line[5::6])
o = np.array(line[6::6])
sasa = np.array(line[7::6])
p = np.array([float(i) for i in p])
s = np.array([float(i) for i in s])
o = np.array([float(i) for i in o])
sasa = np.array([float(i) for i in sasa])
# 5. Re-format data
aa[aa=='A'] = 0
aa[aa=='C'] = 1
aa[aa=='D'] = 2
aa[aa=='E'] = 3
aa[aa=='F'] = 4
aa[aa=='G'] = 5
aa[aa=='H'] = 6
aa[aa=='I'] = 7
aa[aa=='K'] = 8
aa[aa=='L'] = 9
aa[aa=='M'] = 10
aa[aa=='N'] = 11
aa[aa=='P'] = 12
aa[aa=='Q'] = 13
aa[aa=='R'] = 14
aa[aa=='S'] = 15
aa[aa=='T'] = 16
aa[aa=='V'] = 17
aa[aa=='W'] = 18
aa[aa=='Y'] = 19
ss[ss=='L'] = 0
ss[ss=='H'] = 1
ss[ss=='E'] = 2
p[p<0] = p[p<0] + 360
s[s<0] = s[s<0] + 360
o[o<0] = o[o<0] + 360
aa = aa.astype(int)
ss = ss.astype(int)
# 6. Padding categories
gap = nx - aa.size
for pad in range(gap):
aa = np.append(aa, -1)
ss = np.append(ss, -1)
# 7. One-hot encode amino acid sequences and secondary structures
Aminos = []
for x in aa:
letter = [0 for _ in range(20)]
if x != -1: letter[x] = 1
Aminos.append(letter)
Struct = []
for x in ss:
letter = [0 for _ in range(3)]
if x != -1: letter[x] = 1
Struct.append(letter)
aa = np.array(Aminos)
ss = np.array(Struct)
# 8. Normalise data [min/max]
p = (p-0)/(360-0)
s = (s-0)/(360-0)
o = (o-0)/(360-0)
sasa = (sasa-0)/(277-0)
# 9. Padding values
for pad in range(gap):
p = np.append(p, 0)
s = np.append(s, 0)
o = np.append(o, 0)
sasa = np.append(sasa, 0)
# 10. Expand axis
p = np.expand_dims(p, axis=1)
s = np.expand_dims(s, axis=1)
o = np.expand_dims(o, axis=1)
sasa = np.expand_dims(sasa, axis=1)
# 11. Export
featur = np.concatenate((aa, ss), axis=1)
angles = np.concatenate((p, s, o), axis=1)
PDBID.append(line[0])
CHAIN.append(line[1])
X.append(featur)
Y.append(angles)
PDBID = np.array(PDBID)
CHAIN = np.array(CHAIN)
PDBID = np.expand_dims(PDBID, axis=1)
CHAIN = np.expand_dims(CHAIN, axis=1)
X = np.array(X)
Y = np.array(Y)
print('X =', X.shape)
print('Y =', Y.shape)
# 12. Serialise tensors
with h5py.File('Frag_Y.h5', 'w') as y:
dset = y.create_dataset('default', data=Y)
with h5py.File('Frag_X.h5', 'w') as x:
dset = x.create_dataset('default', data=X)
def SQM(filename):
'''
Structure Quality Metric:
Calculates the ratio of helices and sheets to loops, the percent of amino
acids comprising the structure core, and the radius of gyration as values
between 0.0-1.0, it then averages the three values. Returns a value between
0.0-1.0 where good structure >= 0.6
'''
parser = Bio.PDB.PDBParser()
structure = parser.get_structure('{}'.format(filename), filename)
dssp = Bio.PDB.DSSP(structure[0], filename, acc_array='Wilke')
AminoAcid = { 'A':129, 'P':159, 'N':195, 'H':224,
'V':174, 'Y':263, 'C':167, 'K':236,
'I':197, 'F':240, 'Q':225, 'S':155,
'L':201, 'W':285, 'E':223, 'T':172,
'M':224, 'R':274, 'G':104, 'D':193}
sec_struct = []
SASA = []
for aa in dssp:
if aa[2] == 'G' or aa[2] == 'H' or aa[2] == 'I': ss = 'H'
elif aa[2] == 'B' or aa[2] == 'E': ss = 'S'
elif aa[2] == 'S' or aa[2] == 'T' or aa[2] == '-': ss = 'L'
sec_struct.append(ss)
sasa = AminoAcid[aa[1]]*aa[3]
if sasa <= 25: sasa = 'C'
elif 25 < sasa < 40:sasa = 'B'
elif sasa >= 40: sasa = 'S'
SASA.append(sasa)
''' Secondary structure measurement '''
H = len([x for x in sec_struct if x == 'H'])
S = len([x for x in sec_struct if x == 'S'])
L = len([x for x in sec_struct if x == 'L'])
total = len(sec_struct)
ratio = (H+S)/total
limit = 1
slope = 10
bias = 0.5
SS = limit/(1+np.exp(slope*(bias-ratio)))
''' SASA measurement '''
surface = len([x for x in SASA if x == 'S'])
boundery = len([x for x in SASA if x == 'B'])
in_core = len([x for x in SASA if x == 'C'])
total = len(SASA)
percent = (in_core*100)/total
Core = (2.50662/math.sqrt(2*(math.pi)))*math.exp(-((percent-30)**2)/100)
''' Radius of gyration measurement '''
coord = list()
mass = list()
Structure = open(filename, 'r')
for line in Structure:
try:
line = line.split()
x = float(line[6])
y = float(line[7])
z = float(line[8])
coord.append([x, y, z])
if line[-1] == 'C': mass.append(12.0107)
elif line[-1] == 'O': mass.append(15.9994)
elif line[-1] == 'N': mass.append(14.0067)
elif line[-1] == 'S': mass.append(32.065)
except: pass
xm = [(m*i, m*j, m*k) for (i, j, k), m in zip(coord, mass)]
tmass = sum(mass)
rr = sum(mi*i + mj*j + mk*k for (i, j, k), (mi, mj, mk) in zip(coord, xm))
mm = sum((sum(i)/tmass)**2 for i in zip(*xm))
rg = math.sqrt(rr/tmass-mm)
Rg = (2.50662/math.sqrt(2*(math.pi)))*math.exp(-((rg-12)**2)/40)
''' The metric '''
TheMetric = sum([SS, Core, Rg])/3
return(round(TheMetric, 5))
class fold():
''' Folds a protein structure given the phi/psi angles and contact map '''
def __init__(self, Pa, Sa, CM):
CM = np.reshape(CM, (150, 150))
self.size = len([i for i in np.diag(CM, k=1) if i!=0])
self.U = np.triu(CM, k=0)[:self.size, :self.size]
self.L = np.tril(CM, k=0)[:self.size, :self.size]
self.P = np.array(Pa)[:self.size]
self.S = np.array(Sa)[:self.size]
def upper_lower(self, side, | |
a._collect_garbage()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics.get('aged_out', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
self.assertEqual(a.statistics.get('removed', 0), 0)
CUR_LOGICAL_TIME = 3
a._age_out()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics.get('removed', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
self.assertEqual(a.statistics['aged_out'], 0)
CUR_LOGICAL_TIME = 4
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics.get('removed', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 5
a._age_out()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics.get('removed', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 6
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics.get('removed', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 7
a._age_out()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics.get('removed', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 8
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics['removed'], 1)
self.assertEqual(a.statistics['garbage_collected'], 1)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 9
a._age_out()
self.assertEqual(a.statistics['added'], 1)
self.assertEqual(a.statistics['removed'], 1)
self.assertEqual(a.statistics['garbage_collected'], 1)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 10
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 2)
self.assertEqual(a.statistics['removed'], 1)
self.assertEqual(a.statistics['garbage_collected'], 1)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 11
a._age_out()
self.assertEqual(a.statistics['added'], 2)
self.assertEqual(a.statistics['removed'], 1)
self.assertEqual(a.statistics['garbage_collected'], 1)
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 12
a._age_out()
self.assertEqual(a.statistics['added'], 2)
self.assertEqual(a.statistics['removed'], 1)
self.assertEqual(a.statistics['garbage_collected'], 1)
self.assertEqual(a.statistics['aged_out'], 2)
self.assertEqual(a.statistics['withdraw.tx'], 2)
a.stop()
a = None
chassis = None
rpcmock = None
ochannel = None
gc.collect()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_permanent_feed(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
global CUR_LOGICAL_TIME
chassis = mock.Mock()
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
a = PermanentFeed(FTNAME, chassis)
inputs = []
output = False
a.connect(inputs, output)
a.mgmtbus_initialize()
a.start()
self.assertEqual(spawnl_mock.call_count, 2)
self.assertEqual(spawn_mock.call_count, 3)
CUR_LOGICAL_TIME = 1
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
self.assertEqual(um_mock.call_count, 1)
CUR_LOGICAL_TIME = 2
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 3)
self.assertEqual(a.statistics.get('removed', 0), 0)
CUR_LOGICAL_TIME = 3
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
CUR_LOGICAL_TIME = 4
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 4)
self.assertEqual(a.statistics.get('removed', 0), 1)
self.assertEqual(a.statistics.get('garbage_collected', 0), 1)
CUR_LOGICAL_TIME = 5
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 1)
CUR_LOGICAL_TIME = 6
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 1)
CUR_LOGICAL_TIME = 7
a._age_out()
self.assertEqual(a.statistics['aged_out'], 1)
CUR_LOGICAL_TIME = 8
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 4)
self.assertEqual(a.statistics.get('garbage_collected', 0), 4)
self.assertEqual(a.length(), 0)
a.stop()
a = None
chassis = None
rpcmock = None
ochannel = None
gc.collect()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_superpermanent_feed(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
global CUR_LOGICAL_TIME
chassis = mock.Mock()
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
a = PermanentFeed(FTNAME, chassis)
inputs = []
output = False
a.connect(inputs, output)
a.mgmtbus_initialize()
a.start()
self.assertEqual(spawnl_mock.call_count, 2)
self.assertEqual(spawn_mock.call_count, 3)
CUR_LOGICAL_TIME = 1
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
self.assertEqual(um_mock.call_count, 1)
CUR_LOGICAL_TIME = 2
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 3)
self.assertEqual(a.statistics.get('removed', 0), 0)
CUR_LOGICAL_TIME = 4
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 4)
self.assertEqual(a.statistics.get('removed', 0), 1)
self.assertEqual(a.statistics.get('garbage_collected', 0), 1)
self.assertEqual(a.statistics.get('aged_out', 0), 1)
CUR_LOGICAL_TIME = 8
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 4)
self.assertEqual(a.statistics.get('garbage_collected', 0), 4)
self.assertEqual(a.length(), 0)
a.stop()
a = None
chassis = None
rpcmock = None
ochannel = None
gc.collect()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_drop_old_ops(self, um_mock, event_mock, sleep_mock, spawnl_mock, spawn_mock):
global CUR_LOGICAL_TIME
chassis = mock.Mock()
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
a = DeltaFeed(FTNAME, chassis)
inputs = []
output = False
a.connect(inputs, output)
a.mgmtbus_initialize()
a.start()
self.assertEqual(spawnl_mock.call_count, 2)
self.assertEqual(spawn_mock.call_count, 3)
a._actor_queue.put((0, 'age_out'))
a._actor_queue.put((999, 'age_out'))
CUR_LOGICAL_TIME = 1
try:
a._actor_loop()
except gevent.hub.LoopExit:
pass
self.assertEqual(a.last_ageout_run, 1000)
self.assertEqual(um_mock.call_count, 2)
a.stop()
a = None
chassis = None
rpcmock = None
ochannel = None
gc.collect()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_permanentwithtype_feed(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
global CUR_LOGICAL_TIME
chassis = mock.Mock()
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
a = PermanentFeedWithType(FTNAME, chassis)
inputs = []
output = False
a.connect(inputs, output)
a.mgmtbus_initialize()
a.start()
self.assertEqual(spawnl_mock.call_count, 2)
self.assertEqual(spawn_mock.call_count, 3)
CUR_LOGICAL_TIME = 1
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
self.assertEqual(um_mock.call_count, 1)
CUR_LOGICAL_TIME = 2
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 3)
self.assertEqual(a.statistics.get('removed', 0), 0)
CUR_LOGICAL_TIME = 3
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
CUR_LOGICAL_TIME = 4
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 5)
self.assertEqual(a.statistics.get('removed', 0), 2)
self.assertEqual(a.statistics.get('garbage_collected', 0), 2)
CUR_LOGICAL_TIME = 5
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 2)
CUR_LOGICAL_TIME = 6
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 2)
CUR_LOGICAL_TIME = 7
a._age_out()
self.assertEqual(a.statistics['aged_out'], 2)
CUR_LOGICAL_TIME = 8
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 5)
self.assertEqual(a.statistics.get('garbage_collected', 0), 5)
self.assertEqual(a.length(), 0)
a.stop()
a = None
chassis = None
rpcmock = None
ochannel = None
gc.collect()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_1(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt0 = minemeld.ft.basepoller._BPTable_v0(t)
bpt0.put('A', {'v': 1})
A = bpt0.get('A')
self.assertEqual(A['v'], 1)
A, V = next(bpt0.query(include_value=True))
self.assertEqual(V['v'], 1)
bpt0.delete('A')
A = bpt0.get('A')
self.assertEqual(A, None)
bpt0.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_2(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt1 = minemeld.ft.basepoller._BPTable_v1(t, type_in_key=True)
bpt1.put('A', {'type': 1})
A = next(bpt1.query(include_value=False))
self.assertEqual(A, 'A')
bpt1.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_3(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt1 = minemeld.ft.basepoller._BPTable_v1(t, type_in_key=True)
bpt1.close()
with self.assertRaises(RuntimeError):
t = minemeld.ft.table.Table(FTNAME, truncate=False)
minemeld.ft.basepoller._BPTable_v1(t, type_in_key=False)
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_4(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt1 = minemeld.ft.basepoller._BPTable_v1(t, type_in_key=True)
with self.assertRaises(RuntimeError):
bpt1.put('A', {'a': 1})
bpt1.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_5(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt1 = minemeld.ft.basepoller._BPTable_v1(t, type_in_key=True)
bpt1.close()
bpt1 = minemeld.ft.basepoller._bptable_factory(FTNAME, truncate=False, type_in_key=True)
bpt1.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_6(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt0 = minemeld.ft.basepoller._BPTable_v0(t)
bpt0.put('A', {'v': 1})
bpt0.close()
bpt1 = minemeld.ft.basepoller._bptable_factory(FTNAME, truncate=False, type_in_key=False)
bpt1.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_7(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt0 = minemeld.ft.basepoller._BPTable_v0(t)
bpt0.put('A', {'v': 1})
bpt0.delete('A')
bpt0.close()
bpt1 = minemeld.ft.basepoller._bptable_factory(FTNAME, truncate=False, type_in_key=True)
bpt1.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_bptable_8(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
t = minemeld.ft.table.Table(FTNAME, truncate=True)
bpt1 = minemeld.ft.basepoller._BPTable_v1(t, type_in_key=True)
bpt1.put(indicator=u'☃.net/påth', value={u'☃.net/påth': 1, 'type': u'☃.net/påth'})
t = bpt1.get(u'☃.net/påth', itype=u'☃.net/påth')
self.assertNotEqual(t, None)
k, v = next(bpt1.query(include_value=True))
self.assertEqual(k, u'☃.net/påth')
self.assertEqual(v, {u'☃.net/påth': 1, 'type': u'☃.net/påth'})
bpt1.close()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep', side_effect=gevent.GreenletExit())
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_permanentwithtype_feed_agg(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
global CUR_LOGICAL_TIME
chassis = mock.Mock()
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
a = PermanentFeedWithTypeAggregated(FTNAME, chassis)
inputs = []
output = False
a.connect(inputs, output)
a.mgmtbus_initialize()
a.start()
self.assertEqual(spawnl_mock.call_count, 2)
self.assertEqual(spawn_mock.call_count, 3)
CUR_LOGICAL_TIME = 1
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
self.assertEqual(um_mock.call_count, 1)
CUR_LOGICAL_TIME = 2
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 3)
self.assertEqual(a.statistics.get('removed', 0), 0)
CUR_LOGICAL_TIME = 3
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
CUR_LOGICAL_TIME = 4
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 5)
self.assertEqual(a.statistics.get('removed', 0), 2)
self.assertEqual(a.statistics.get('garbage_collected', 0), 2)
CUR_LOGICAL_TIME = 5
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 2)
CUR_LOGICAL_TIME = 6
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 2)
CUR_LOGICAL_TIME = 7
a._age_out()
self.assertEqual(a.statistics['aged_out'], 2)
CUR_LOGICAL_TIME = 8
a._poll()
a._sudden_death()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 5)
self.assertEqual(a.statistics.get('garbage_collected', 0), 5)
self.assertEqual(a.length(), 0)
a.stop()
a = None
chassis = None
rpcmock = None
ochannel = None
gc.collect()
@mock.patch.object(gevent, 'spawn')
@mock.patch.object(gevent, 'spawn_later')
@mock.patch.object(gevent, 'sleep')
@mock.patch('gevent.event.Event', side_effect=gevent_event_mock_factory)
@mock.patch('minemeld.ft.basepoller.utc_millisec', side_effect=logical_millisec)
def test_permanentwithtype_feed_agg2(self, um_mock, event_mock,
sleep_mock, spawnl_mock, spawn_mock):
global CUR_LOGICAL_TIME
chassis = mock.Mock()
ochannel = mock.Mock()
chassis.request_pub_channel.return_value = ochannel
rpcmock = mock.Mock()
rpcmock.get.return_value = {'error': None, 'result': 'OK'}
chassis.send_rpc.return_value = rpcmock
a = DeltaFeedWithTypeAggregatedFaulty(FTNAME, chassis)
inputs = []
output = False
a.connect(inputs, output)
a.mgmtbus_initialize()
a.start()
self.assertEqual(spawnl_mock.call_count, 2)
self.assertEqual(spawn_mock.call_count, 3)
CUR_LOGICAL_TIME = 1
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
self.assertEqual(um_mock.call_count, 1)
CUR_LOGICAL_TIME = 2
a._poll()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 3)
self.assertEqual(a.statistics.get('removed', 0), 0)
CUR_LOGICAL_TIME = 3
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
CUR_LOGICAL_TIME = 4
a._poll()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 5)
self.assertEqual(a.statistics.get('removed', 0), 0)
self.assertEqual(a.statistics.get('garbage_collected', 0), 0)
CUR_LOGICAL_TIME = 5
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
CUR_LOGICAL_TIME = 6
a._age_out()
self.assertEqual(a.statistics.get('aged_out', 0), 0)
CUR_LOGICAL_TIME = 7
a._age_out()
self.assertEqual(a.statistics['aged_out'], 3)
CUR_LOGICAL_TIME = 8
a._poll()
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['added'], 5)
self.assertEqual(a.statistics.get('garbage_collected', 0), 3)
self.assertEqual(a.length(), 2)
CUR_LOGICAL_TIME = 9
a._age_out()
a._collect_garbage()
self.assertEqual(a.statistics['aged_out'], 5)
self.assertEqual(a.length(), 0)
a.stop()
a = | |
<filename>office.py
import pandas as pd
import numpy as np
import math
import re
from pptx import Presentation
from pptx.util import Inches
from pptx.util import Pt
# _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_
class ExcelSPC():
filename = None
sheets = None
valid = False
SL_flag = []
header_master = ['Part Number', 'Description', 'Key Parameter', 'Parameter Name',
'LSL', 'Target', 'USL', 'Chart Type', 'Metrology', 'Multiple',
'Lower Tol', 'Upper Tol', 'Spec Type', 'CL Frozen',
'LCL', 'Avg', 'UCL', 'RLCL', 'R Avg', 'RUCL', 'CLCR Lower', 'CLCR Upper',
'Total # of Recent Points', '%OOC for Recent Points',
'Cpk for Recent Points', 'PPM for Recent Points',
'Parameter Classification', 'Product Classification',
'Recent Std Dev', 'Cpk for All Points', 'PPM for All Points',
'Cpk for Historic & Recent Points', 'PPM for Historic & Recent Points']
# Regular Expression
pattern: str = re.compile(r'Unnamed:\s[0-9]+') # check Unnamed column
def __init__(self, filename: str):
self.filename: str = filename
self.sheets: dict = self.read(filename)
self.valid: bool = self.check_valid_sheet(self.sheets)
if self.valid is False:
return
self.init_SL_flag()
# -------------------------------------------------------------------------
# init_SL_flag
#
# argument
# (none)
#
# return
# (none)
# -------------------------------------------------------------------------
def init_SL_flag(self):
self.SL_flag = []
df = self.get_master()
n = len(df)
for i in range(n):
self.SL_flag.append(False)
# -------------------------------------------------------------------------
# set_SL_flag
#
# argument
# row :
# flag :
#
# return
# (none)
# -------------------------------------------------------------------------
def set_SL_flag(self, row: int, flag: bool):
self.SL_flag[row] = flag
# -------------------------------------------------------------------------
# get_SL_flag
#
# argument
# row
#
# return
# bool: Spec Limit status for specified row
# -------------------------------------------------------------------------
def get_SL_flag(self, row: int) -> bool:
return self.SL_flag[row]
# -------------------------------------------------------------------------
# get_filename
#
# argument
# row
#
# return
# str: self.filename
# -------------------------------------------------------------------------
def get_filename(self) -> str:
return self.filename
# -------------------------------------------------------------------------
# check_valid_sheet
# check if read file (sheets) has 'Master' tab
#
# argument
# sheets : dataframe containing Excel contents
#
# return
# True if dataframe is valid for SPC, otherwise False
# -------------------------------------------------------------------------
def check_valid_sheet(self, sheets) -> bool:
# check if 'Master' tab exists
if 'Master' in sheets.keys():
if len(self.sheets['Master'].columns) == len(self.header_master):
self.sheets['Master'].columns = self.header_master
return True
else:
# if extra column is Unnamed column, or just empty column, these columns are just accepted
if len(self.sheets['Master'].columns) > len(self.header_master):
header_master_new = self.header_master
for col in range(len(self.header_master), len(self.sheets['Master'].columns)):
# check Unnamed column
match: bool = self.pattern.match(self.sheets['Master'].columns[col])
if not match:
return False # if the column is not Unnamed, this is treadted as invallid format
header_master_new.append(self.sheets['Master'].columns[col])
# treat Unnamed column is valid column
self.sheets['Master'].columns = header_master_new
return True
return False
else:
return False
# -------------------------------------------------------------------------
# get_master
# get dataframe of 'Master' tab
#
# argument
# (none)
#
# return
# pandas dataframe of 'Master' tab
# -------------------------------------------------------------------------
def get_master(self) -> pd.DataFrame:
df: pd.DataFrame = self.sheets['Master']
# drop row if column 'Part Number' is NaN
df = df.dropna(subset=['Part Number'])
return df
# -------------------------------------------------------------------------
# get_metrics
#
# argument
# name_part :
# param :
#
# return
# dict - metrics dictionary for specified PART and PARAMETER
# -------------------------------------------------------------------------
def get_metrics(self, name_part, param):
df = self.get_master()
df1 = df[(df['Part Number'] == name_part) & (df['Parameter Name'] == param)]
dict = {}
dict['Part Number'] = list(df1['Part Number'])[0]
dict['Description'] = list(df1['Description'])[0]
dict['Key Parameter'] = list(df1['Key Parameter'])[0]
dict['Parameter Name'] = list(df1['Parameter Name'])[0]
dict['LSL'] = list(df1['LSL'])[0]
dict['Target'] = list(df1['Target'])[0]
dict['USL'] = list(df1['USL'])[0]
dict['Chart Type'] = list(df1['Chart Type'])[0]
dict['Metrology'] = list(df1['Metrology'])[0]
dict['Multiple'] = list(df1['Multiple'])[0]
dict['Lower Tol'] = list(df1['Lower Tol'])[0]
dict['Upper Tol'] = list(df1['Upper Tol'])[0]
dict['Spec Type'] = list(df1['Spec Type'])[0]
dict['CL Frozen'] = list(df1['CL Frozen'])[0]
dict['LCL'] = list(df1['LCL'])[0]
dict['Avg'] = list(df1['Avg'])[0]
dict['UCL'] = list(df1['UCL'])[0]
dict['RLCL'] = list(df1['RLCL'])[0]
dict['R Avg'] = list(df1['R Avg'])[0]
dict['RUCL'] = list(df1['RUCL'])[0]
dict['CLCR Lower'] = list(df1['CLCR Lower'])[0]
dict['CLCR Upper'] = list(df1['CLCR Upper'])[0]
dict['Total # of Recent Points'] = list(df1['Total # of Recent Points'])[0]
dict['%OOC for Recent Points'] = list(df1['%OOC for Recent Points'])[0]
dict['Cpk for Recent Points'] = list(df1['Cpk for Recent Points'])[0]
dict['PPM for Recent Points'] = list(df1['PPM for Recent Points'])[0]
dict['Recent Std Dev'] = list(df1['Recent Std Dev'])[0]
dict['Cpk for All Points'] = list(df1['Cpk for All Points'])[0]
dict['PPM for All Points'] = list(df1['PPM for All Points'])[0]
dict['Cpk for Historic & Recent Points'] = list(df1['Cpk for Historic & Recent Points'])[0]
dict['PPM for Historic & Recent Points'] = list(df1['PPM for Historic & Recent Points'])[0]
return dict
# -------------------------------------------------------------------------
# get_param_list
# get list of 'Parameter Name' of specified 'Part Number'
#
# argument
# name_part : part name
#
# return
# list of 'Parameter Name' of specified 'Part Number'
# -------------------------------------------------------------------------
def get_param_list(self, name_part):
df = self.get_master()
return list(df[df['Part Number'] == name_part]['Parameter Name'])
# -------------------------------------------------------------------------
# get_part
# get dataframe of specified name_part tab
#
# argument
# (none)
#
# return
# pandas dataframe of specified name_part tab, eliminating 'Hide'
# -------------------------------------------------------------------------
def get_part(self, name_part):
df1 = self.get_part_all(name_part)
# eliminate 'Hide' data
df2 = df1[df1['Data Type'] != 'Hide']
return df2
# -------------------------------------------------------------------------
# get_part
# get dataframe of specified name_part tab
#
# argument
# (none)
#
# return
# pandas dataframe of specified name_part tab (all data)
# -------------------------------------------------------------------------
def get_part_all(self, name_part):
# dataframe of specified name_part tab
df = self.sheets[name_part]
# delete row including NaN
df = df.dropna(how='all')
# _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_
# the first row of data sheet is used for 'Create Charts' button for
# the Excel macro
#
# So, new dataframe is created for this application
# _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_
# obtain number of rows on this dataframe
row_size = len(df)
# extract data rows
df1 = df[1:row_size]
# extract column name used for this dataframe
list_colname = list(df.loc[0])
df1.columns = list_colname
# for colname in ['Sample', 'Date', 'Job ID or Lot ID', 'Serial Number', 'Data Type']:
df1 = df1.dropna(subset=['Data Type'])
return df1
# -------------------------------------------------------------------------
# get_sheets
# get dataframe containing Excel contents
#
# argument
# (none)
#
# return
# array of pandas dataframe containing Excel tab/data
# -------------------------------------------------------------------------
def get_sheets(self):
return self.sheets
# -------------------------------------------------------------------------
# get_unique_part_list
# get unique part list found in 'Part Number' column in 'Master' tab
#
# argument
# (none)
#
# return
# list of unique 'Part Number'
# -------------------------------------------------------------------------
def get_unique_part_list(self):
df = self.get_master()
list_part = list(np.unique(df['Part Number']))
return list_part
# -------------------------------------------------------------------------
# get_header_master
# get header list used for making table
#
# argument
# (none)
#
# return
# self.header_master
# -------------------------------------------------------------------------
def get_header_master(self):
return self.header_master
# -------------------------------------------------------------------------
# read
# read specified Excel file
#
# argument
# filename : Excel file
#
# return
# array of pandas dataframe including all Excel sheets
# -------------------------------------------------------------------------
def read(self, filename):
# read specified filename as Excel file including all tabs
# return pd.read_excel(filename, sheet_name=None)
df = pd.read_excel(
filename,
sheet_name=None,
engine='openpyxl',
)
return df
# _/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_/_
class PowerPoint():
ppt = None
def __init__(self, template):
# insert empty slide
self.ppt = Presentation(template)
# -------------------------------------------------------------------------
# add_slide
#
# argument
# sheets :
# info :
#
# return
# (none)
# -------------------------------------------------------------------------
def add_slide(self, sheets, info):
metrics = sheets.get_metrics(info['PART'], info['PARAM'])
metrics = self.check_dict(metrics)
# ---------------------------------------------------------------------
# refer layout from original master
# ---------------------------------------------------------------------
slide_layout = self.ppt.slide_layouts[1]
slide = self.ppt.slides.add_slide(slide_layout)
shapes = slide.shapes
# ---------------------------------------------------------------------
# slide title
# ---------------------------------------------------------------------
shapes.title.text = info['PART']
# ---------------------------------------------------------------------
# insert textbox
# ---------------------------------------------------------------------
##### DEBUG ROUINE for PLACEHOLDER INDEX #####
# for shape in slide.placeholders:
# print('%d %s' % (shape.placeholder_format.idx, shape.name))
# Placeholder 1
ph1 = shapes.placeholders[20]
tf1 = ph1.text_frame
tf1.text = self.get_body_text_1(metrics)
# Placeholder 2
ph2 = shapes.placeholders[21]
tf2 = ph2.text_frame
tf2.text = self.get_body_text_2(info, metrics)
# ---------------------------------------------------------------------
# insert image
# ---------------------------------------------------------------------
ileft = Inches(0)
itop = Inches(1.92)
iheight = Inches(3.5)
slide.shapes.add_picture(info['IMAGE'], left=ileft, top=itop, height=iheight)
# ---------------------------------------------------------------------
# insert table
# ---------------------------------------------------------------------
# self.create_table(metrics, shapes)
# -------------------------------------------------------------------------
# get_body_text_1
#
# argument
# metrics :
#
# return
# (none)
# -------------------------------------------------------------------------
def get_body_text_1(self, metrics):
# Chart Type Information
if metrics['Chart Type'] == 'LJ':
dist = 'Normal'
ctype = 'Levey Jennings'
elif metrics['Chart Type'] == 'IR':
dist = 'Non-normal'
ctype = 'IR'
else:
dist = 'Unknown'
ctype = 'n/a'
# Spec Information
if metrics['Spec Type'] == 'Two-Sided':
spec | |
<reponame>Sohojoe/damon<gh_stars>0
# -*- coding: utf-8 -*-
"""
this module creates functionality to generate unit tests at runtime.
For now it will have very basic functionality, but it will be improving with
time.
"""
import damon1 as dmn
import os
import sys
import inspect
from copy import deepcopy as cp
import numpy as np
class UnitTestsGenerator:
'''
this class contains method generate to generate unit tests at runtime.
'''
def __init__(self, module_name, class_name, method__name):
'''
this class contains methods to generate unit tests at runtime.
'''
if module_name is None:
module_name = ""
if class_name is None:
class_name = ""
path = dmn.__path__[0]+'/tests/new/'
os.chdir(path)
file_full_name = path + module_name + "_" + class_name + "_" + \
method__name + ".py"
self.cg_ = dmn.code_generator.CodeGenerator(file_full_name)
self.codeblocks_list = []
self.module_name = module_name
self.class_name = class_name
self.method_name = method__name
#super(UnitTestsGenerator, self).__init__()
def generate(self,
create_data_scenarios=None,
create_data_function_module_name=None,
create_data_function_name=None,
method_scenarios=None,
is_create_data_needed=None):
'''
this method will generate the unit tests at runtime based on parameters
given to method
'''
#convert scenarios to parameters
[create_data_paramters, method_parameters_permutations] = \
self.convert_scenarios_to_parameters(create_data_scenarios,
method_scenarios)
#append import statements
self.append_import_statements()
self.codeblocks_list.append(self.cg_.create_newline())
if create_data_function_name is not None:
#get create data method
exec("import " + create_data_function_module_name)
create_data_function = getattr(
sys.modules[create_data_function_module_name],
create_data_function_name)
self.codeblocks_list.append(self.cg_.get_object_sourcecode(
create_data_function))
self.codeblocks_list.append(self.cg_.get_object_sourcecode(
get_method_parameters))
self.codeblocks_list.append(self.cg_.get_object_sourcecode(
format_method_parameters))
self.codeblocks_list.append(self.cg_.get_object_sourcecode(
run_method))
self.codeblocks_list.append(self.cg_.get_object_sourcecode(
generate_reports))
self.codeblocks_list.append(self.cg_.get_object_sourcecode(
test_flow))
#create an assignment statement of create_data_parameters
self.codeblocks_list.append(self.cg_.create_statement(
"create_data_parameters_list = " + self.
cg_.convert_object_to_string(create_data_paramters)))
#create an assignment statement of method_parameters
self.codeblocks_list.append(self.cg_.create_statement(
"method_parameters_permutations = " + self.cg_
.convert_object_to_string(method_parameters_permutations)))
#create an assignment statement of method_parameters
self.codeblocks_list.append(self.cg_.create_statement(
"test_flow(create_data_parameters_list, method_parameters_permutations, \""
+ self.cg_.convert_object_to_string(self.module_name) + "\", \""
+ self.cg_.convert_object_to_string(self.class_name) + "\", \""
+ self.cg_.convert_object_to_string(self.method_name) + "\", "
+ self.cg_.convert_object_to_string(create_data_function_name)
+ " )" ))
#write whole code to file.
self.cg_.write_to_file(self.codeblocks_list)
print "test file generated"
return None
def append_import_statements(self, import_statemetnts_scenarios=None):
'''
it converts scenarios to parameters
'''
self.codeblocks_list.append(
self.cg_.create_statement("import damon1 as dmn"))
self.codeblocks_list.append(
self.cg_.create_statement("import sys"))
self.codeblocks_list.append(
self.cg_.create_statement("import os"))
self.codeblocks_list.append(
self.cg_.create_statement("import inspect"))
self.codeblocks_list.append(
self.cg_.create_statement("from copy import deepcopy as cp"))
self.codeblocks_list.append(
self.cg_.create_statement("from array import array as array"))
self.codeblocks_list.append(
self.cg_.create_statement("import numpy as np"))
def convert_scenarios_to_parameters(self, create_data_scenarios=None,
method_scenarios=None):
'''
it converts scenarios to parameters
'''
return [create_data_scenarios, method_scenarios]
def test_flow(create_parameters_list, method_parameters_permutations,
module_name, class_name, method_name,
create_data_function):
'''
this method defines the complete flow of test.
'''
method = None
#get method from its name
if module_name is not "" :
exec("import " + module_name)
if class_name is not "" :
cls = getattr(sys.modules[module_name], class_name)
method = getattr(cls, method_name)
else :
method = getattr(sys.modules[module_name], method_name)
if method == None:
print "method was not found. Check your module name , class name , method name"
return None
method_parameters_list = get_method_parameters(method,
method_parameters_permutations)
method_output_list = []
method_exception_list = []
for create_parameters in create_parameters_list:
if create_data_function is not None :
data_object = create_data_function(*create_parameters)
for method_parameters in method_parameters_list:
formattted_method_parameters = format_method_parameters(
data_object, method_parameters)
[run_method_output, method_exception] = \
run_method(method, formattted_method_parameters)
method_output_list.append(run_method_output)
method_exception_list.append(method_exception)
generate_reports(method_output_list , method_exception_list)
def create_Damon_data(nfac0, # [Number of Facet 0 elements -- rows/persons]
nfac1, # [Number of Facet 1 elements -- columns/items]
ndim, # [Number of dimensions to create]
seed = None, # [None => randomly pick starter coordinates; int => integer of "seed" random coordinates]
facmetric = [4,-2], # [[m,b] => rand() * m + b, to set range of facet coordinate values]
noise = None, # [<None, noise, {'Rows':<noise,{1:noise1,4:noise4}>,'Cols':<noise,{2:noise2,5:noise5}> => add error to rows/cols]
validchars = None, # [None; ['All',[valid chars]]; or ['Cols', {1:['a','b','c','d'],2:['All'],3:['1.2 -- 3.5'],4:['0 -- '],...}] ]
mean_sd = None, # [None; ['All',[Mean,SD]]; or ['Cols', {1:[Mean1,SD1],2:[Mean2,SD2],3:[None],...}] ]
p_nan = 0.0, # [Proportion of cells to make missing at random]
nanval = -999., # [Numeric code for designating missing values]
condcoord_ = None, # [< None, 'Orthonormal']
nheaders4rows = 1, # [Number of header column labels to put before each row]
nheaders4cols = 1, # [Number of header row labels to put before each column]
extra_headers = 0, # [If headers > 1, range of integer values for header labels, applies to both row and col.]
input_array = None, # [<None, name of data array to import>]
output_as = 'Damon', # [<'Damon','datadict','array','file','Damon+file','datadict+file','array+file'>]
outfile = None, # [<None, name of the output file/path prefix when output_as includes 'file'>]
delimiter = None, # [<None, delimiter character used to separate fields of output file, e.g., ',' or ' '>]
bankf0 = None, # [<None => no bank,[<'All', list of F0 (Row) entities>]]
bankf1 = None, # [<None => no bank,['MyBank.pkl',[<'All', list of F1 (Col) entities>]]> ]
createbank = None ,#[None, True]
convert_to_datadict = None , #[<None , True>],
BankName = None, # [<None,'MyBank.pkl'>]
colkeytype = None, #[None, True]
rowkeytype = None, #[None, True]
simulate_whole = None, #[None, True]
extract_test = None,
create_sample_datadict = None,
**kwargs
):
'''
it uses create data parameters to create data to be passed to method.
'''
'''
run_fin_est = None
run_equate = None
run_summstat = None
run_est2logit = None
run_subscale = None
run_fin_resid = None
run_base_fit = None
run_coord = None
run_base_est = None
run_base_resid = None
run_base_ear = None
run_base_se = None
run_fin_resid = None
run_base_fit = None
run_parse = None
run_standardize = None
run_pseudomiss = None
fin_est_parameters = {}
equate_parameters = {}
summstat_parameters = {}
est2logit_parameters = {}
subscale_parameters = {}
fin_resid_parameters = {}
base_fit_parameters = {}
coord_parameters = {"ndim":[[ndim]]}
base_est_parameters = {}
base_resid_parameters = {}
base_ear_parameters = {}
base_se_parameters = {}
fin_resid_parameters = {}
base_fit_parameters = {}
parse_parameters = {"resp_cat":'Find'}
standardize_parameters = {"metric":'std_params',"std_params":bankf1[0]}
pseudomiss_parameters = {"seed":1,"rand_nan" : 0.1}
if(kwargs.has_key("run_fin_est")):
run_fin_est=kwargs["run_fin_est"]
if(kwargs.has_key("run_equate")):
run_equate=kwargs["run_equate"]
if(kwargs.has_key("run_summstat")):
run_summstat=kwargs["run_summstat"]
if(kwargs.has_key("run_est2logit")):
run_est2logit=kwargs["run_est2logit"]
if(kwargs.has_key("run_subscale")):
run_subscale=kwargs["run_subscale"]
if(kwargs.has_key("run_fin_resid")):
run_fin_resid=kwargs["run_fin_resid"]
if(kwargs.has_key("run_base_fit")):
run_base_fit=kwargs["run_base_fit"]
if(kwargs.has_key("run_coord")):
run_coord=kwargs["run_coord"]
if(kwargs.has_key("run_base_est")):
run_base_est=kwargs["run_base_est"]
if(kwargs.has_key("run_base_resid")):
run_base_resid=kwargs["run_base_resid"]
if(kwargs.has_key("run_base_ear")):
run_base_ear=kwargs["run_base_ear"]
if(kwargs.has_key("run_base_se")):
run_base_se=kwargs["run_base_se"]
if(kwargs.has_key("run_fin_resid")):
run_fin_resid=kwargs["run_fin_resid"]
if(kwargs.has_key("run_base_fit")):
run_base_fit=kwargs["run_base_fit"]
if(kwargs.has_key("run_parse")):
run_parse=kwargs["run_parse"]
if(kwargs.has_key("run_standardize")):
run_standardize=kwargs["run_standardize"]
if(kwargs.has_key("run_pseudomiss")):
run_pseudomiss=kwargs["run_pseudomiss"]
if(kwargs.has_key("fin_est_parameters")):
fin_est_parameters=kwargs["fin_est_parameters"]
if(kwargs.has_key("equate_parameters")):
equate_parameters=kwargs["equate_parameters"]
if(kwargs.has_key("summstat_parameters")):
summstat_parameters=kwargs["summstat_parameters"]
if(kwargs.has_key("est2logit_parameters")):
est2logit_parameters=kwargs["est2logit_parameters"]
if(kwargs.has_key("subscale_parameters")):
subscale_parameters=kwargs["subscale_parameters"]
if(kwargs.has_key("fin_resid_parameters")):
fin_resid_parameters=kwargs["fin_resid_parameters"]
if(kwargs.has_key("base_fit_parameters")):
base_fit_parameters=kwargs["base_fit_parameters"]
if(kwargs.has_key("coord_parameters")):
coord_parameters=kwargs["coord_parameters"]
if(kwargs.has_key("base_est_parameters")):
base_est_parameters=kwargs["base_est_parameters"]
if(kwargs.has_key("base_resid_parameters")):
base_resid_parameters=kwargs["base_resid_parameters"]
if(kwargs.has_key("base_ear_parameters")):
base_ear_parameters=kwargs["base_ear_parameters"]
if(kwargs.has_key("base_se_parameters")):
base_se_parameters=kwargs["base_se_parameters"]
if(kwargs.has_key("fin_resid_parameters")):
fin_resid_parameters=kwargs["fin_resid_parameters"]
if(kwargs.has_key("base_fit_parameters")):
base_fit_parameters=kwargs["base_fit_parameters"]
if(kwargs.has_key("parse_parameters")):
parse_parameters=kwargs["parse_parameters"]
if(kwargs.has_key("standardize_parameters")):
standardize_parameters=kwargs["standardize_parameters"]
if(kwargs.has_key("pseudomiss_parameters")):
pseudomiss_parameters=kwargs["pseudomiss_parameters"]
'''
#setup data to be passed to create_data
all_args = locals()
create_data_args = all_args.copy()
del_args = ['bankf0', 'bankf1','createbank','pPsMiss','convert_to_datadict'
,'BankName','convert_colkeytype_tostring','simulate_whole',
'extract_test', 'create_sample_datadict']
for arg in del_args:
del create_data_args[arg]
D = dmn.core.create_data(**create_data_args)
data = D['data']
model = D['model']
anskey = D['anskey']
Keys = data['anskey']['rl_col']['ItemID'][:].astype(int)
Vals = data['anskey']['core_col']['Correct']
anskey_2 = ['Cols',dict(zip(Keys,Vals))]
if not isinstance(data,dmn.core.Damon):
data = dmn.core.Damon(data,'datadict_link',pytables=data['fileh'],verbose=None)
model = dmn.core.Damon(model,'datadict_link',pytables=model['fileh'],verbose=None)
anskey = dmn.core.Damon(anskey,'datadict',pytables=None,verbose=None)
if extract_test is True :
temp = np.copy(data.data_out['collabels'][2,:])
data.data_out['collabels'][2,:] = data.data_out['collabels'][0,:]
data.data_out['collabels'][0,:] = temp
data.data_out['key4cols'] = 2
temp = np.copy(data.data_out['rowlabels'][:,2])
data.data_out['rowlabels'][:,2] = data.data_out['rowlabels'][:,0]
data.data_out['rowlabels'][:,0] = temp
data.data_out['key4rows'] = 2
if simulate_whole is True :
try:
data = dmn.core.Damon(D.data_out,'datadict','RCD+Whole')
except AttributeError:
data = dmn.core.Damon(D,'datadict','RCD+Whole')
if colkeytype is not None :
data.colkeytype = colkeytype
data.data_out['colkeytype'] = colkeytype
if rowkeytype is not None :
data.colkeytype = rowkeytype
data.data_out['colkeytype'] = rowkeytype
if convert_to_datadict is True :
data = dmn.core.Damon(data.data_out,'datadict',workformat='RCD_whole')
if kwargs.has_key("pseudomiss") and kwargs["pseudomiss"] is not None:
data.pseudomiss(**kwargs["pseudomiss"])
if kwargs.has_key("parse") and kwargs["parse"] is not None:
data.parse(**kwargs["parse"])
if kwargs.has_key("standardize") and kwargs["standardize"] is not None:
data.standardize(**kwargs["standardize"])
if(createbank == True):
# Needed to test bank specs
data.coord([[ndim]])
data.base_est()
data.base_resid()
data.base_ear()
data.base_se()
if (bankf0 is not None
or bankf1 is not None
):
try:
os.remove(bankf1[0])
except:
pass
data.bank(bankf1[0], { 'Remove' : [None], 'Add' : [bankf0] } ,
{ 'Remove' : [None], 'Add' : bankf1[1] })
# Create new DataObj
data = dmn.core.Damon(data,'Damon',verbose=None)
if kwargs.has_key("pseudomiss") and kwargs["pseudomiss"] is not None:
data.pseudomiss(**kwargs["pseudomiss"])
if kwargs.has_key("parse") and kwargs["parse"] is not None:
data.parse(**kwargs["parse"])
if kwargs.has_key("standardize") and kwargs["standardize"] is not None:
data.standardize(**kwargs["standardize"])
if BankName is not None :
data.coord([[ndim]])
data.base_est()
data.base_resid()
data.base_ear()
data.base_se()
try:
os.remove(BankName)
except:
pass
data.bank(BankName,{'Remove':[None],'Add':['All']},{'Remove':[None],'Add':['All']})
np.set_printoptions(precision=2,suppress=True)
print 'data.data_out=\n',data.data_out['coredata']
# New DAmonObj
data = dmn.core.Damon(data.data_out,'datadict',verbose=None)
kwargs["coord"]= {"ndim":[[ndim]],
"anchors":{'Bank':BankName,'Facet':0,
'Coord':'ent_coord',
'Entities':['All'],'Freshen':None}}
if kwargs.has_key("subscale") and kwargs["subscale"] is not None:
data.subscale(**kwargs["subscale"])
if kwargs.has_key("coord") and kwargs["coord"] is not None:
data.coord(**kwargs["coord"])
if kwargs.has_key("base_est") and kwargs["base_est"] is not None:
data.base_est(**kwargs["base_est"])
if kwargs.has_key("base_resid") and kwargs["base_resid"] is not None:
data.base_resid(**kwargs["base_resid"])
if kwargs.has_key("base_ear") and kwargs["base_ear"] is | |
from __future__ import (absolute_import, division, print_function)
from future.utils import iteritems
from collections import OrderedDict
import xml.etree.ElementTree as xmlET
from pyPRMS.constants import DIMENSION_NAMES
from pyPRMS.prms_helpers import read_xml
def _valid_dimension_name(name):
"""Check if given dimension name is valid for PRMS.
:param str name: dimension name
:returns: True if dimension name is valid otherwise False
:rtype: bool
"""
return name in DIMENSION_NAMES
class Dimension(object):
"""Defines a single dimension."""
def __init__(self, name=None, size=0, description=None):
"""Create a new dimension object.
A dimension has a name and a size associated with it.
:param str name: The name of the dimension
:param int size: The size of the dimension
:param description: Description of the dimension
:type description: str or None
"""
self.__name = None
self.__size = None
self.__description = None
self.name = name
self.size = size
self.description = description
@property
def name(self):
"""Name of the dimension.
:returns: Name of the dimension
:rtype: str
"""
return self.__name
@name.setter
def name(self, name):
"""Sets the name of the dimension.
:param str name: Name of the dimension
:raises ValueError: if dimension name is not a valid PRMS dimension
"""
if _valid_dimension_name(name):
self.__name = name
else:
# TODO: Should this raise an error?
raise ValueError('Dimension name, {}, is not a valid PRMS dimension name'.format(name))
@property
def size(self):
"""Size of the dimension.
:returns: size of the dimension
:rtype: int
"""
return self.__size
@size.setter
def size(self, value):
"""Set the size of the dimension.
:param int value: size of the dimension
:raises ValueError: if dimension size in not a positive integer
"""
if not isinstance(value, int) or value < 0:
raise ValueError('Dimension size must be a positive integer')
if self.__name == 'one':
self.__size = 1
elif self.__name == 'nmonths':
self.__size = 12
elif self.__name == 'ndays':
self.__size = 366
else:
self.__size = value
if self.__name not in ['one', 'nmonths', 'ndays'] and self.__size != value:
print('ERROR: Dimension, {}, size={}, but size {} was requested'.format(self.__name, self.__size, value))
@property
def description(self):
"""Description for the dimension.
:returns: description for the dimension
:rtype: str
"""
return self.__description
@description.setter
def description(self, descstr):
"""Set the description of the dimension.
:param str descstr: description string
"""
self.__description = descstr
def __repr__(self):
return 'Dimension(name={}, size={!r})'.format(self.name, self.size)
def __iadd__(self, other):
"""Add a number to dimension size.
:param int other: integer value
:returns: dimension size
:rtype: int
:raises ValueError: if type of parameter is not an integer
"""
# augment in-place addition so the instance plus a number results
# in a change to self.__size
if not isinstance(other, int):
raise ValueError('Dimension size type must be an integer')
self.__size += other
return self
def __isub__(self, other):
"""Subtracts integer from dimension size.
:param int other: integer value
:returns: dimension size
:rtype: int
:raises ValueError: if type of parameter is not an integer
:raises ValeuError: if parameter is not a positive integer
"""
# augment in-place addition so the instance minus a number results
# in a change to self.__size
if not isinstance(other, int):
raise ValueError('Dimension size type must be an integer')
if self.__size - other < 0:
raise ValueError('Dimension size must be positive')
self.__size -= other
return self
class Dimensions(object):
"""Container of Dimension objects."""
def __init__(self, verbose=False):
"""Create ordered dictionary to contain Dimension objects."""
self.__dimensions = OrderedDict()
self.__verbose = verbose
def __str__(self):
outstr = ''
if len(self.__dimensions) == 0:
outstr = '<empty>'
else:
for kk, vv in iteritems(self.__dimensions):
outstr += '{}: {}\n'.format(kk, vv)
return outstr
def __getattr__(self, name):
# print('ATTR: {}'.format(name))
return getattr(self.__dimensions, name)
def __getitem__(self, item):
"""Get named dimension."""
return self.__dimensions[item]
@property
def dimensions(self):
"""Get ordered dictionary of Dimension objects.
:returns: OrderedDict of Dimension objects
:rtype: collections.OrderedDict[str, Dimension]
"""
return self.__dimensions
@property
def ndims(self):
"""Get number of dimensions.
:returns: number of dimensions
:rtype: int
"""
return len(self.__dimensions)
@property
def xml(self):
"""Get xml element for the dimensions.
:returns: XML element for the dimensions
:rtype: xmlET.Element
"""
# <dimensions>
# <dimension name = "nsegment" position = "1" size = "1434" />
# </ dimensions>
dims_xml = xmlET.Element('dimensions')
for kk, vv in iteritems(self.dimensions):
dim_sub = xmlET.SubElement(dims_xml, 'dimension')
dim_sub.set('name', kk)
xmlET.SubElement(dim_sub, 'size').text = str(vv.size)
# dim_sub.set('size', str(vv.size))
return dims_xml
def add(self, name, size=0):
"""Add a new dimension.
:param str name: name of the dimension
:param int size: size of the dimension
"""
# This method adds a dimension if it doesn't exist
# Duplicate dimension names are silently ignored
# TODO: check for valid dimension size for ndays, nmonths, and one
if name not in self.__dimensions:
try:
self.__dimensions[name] = Dimension(name=name, size=size)
except ValueError as err:
if self.__verbose:
print(err)
else:
pass
# else:
# # TODO: Should this raise an error?
# print('Dimension {} already exists...skipping add name'.format(name))
def add_from_xml(self, filename):
"""Add one or more dimensions from an xml file.
:param str filename: name of xml file to read
"""
# Add dimensions and grow dimension sizes from xml information for a parameter
# This information is found in xml files for each region for each parameter
# No attempt is made to verify whether each region for a given parameter
# has the same or same number of dimensions.
xml_root = read_xml(filename)
# TODO: We can't guarantee the order of the dimensions in the xml file
# so we should make sure dimensions are added in the correct order
# dictated by the position attribute.
# 1) read all dimensions in the correct 'position'-dictated order into a list
# 2) add dimensions in list to the dimensions ordereddict
for cdim in xml_root.findall('./dimensions/dimension'):
name = cdim.get('name')
size = int(cdim.get('size'))
if name not in self.__dimensions:
try:
self.__dimensions[name] = Dimension(name=name, size=size)
except ValueError as err:
print(err)
else:
if name not in ['nmonths', 'ndays', 'one']:
# NOTE: This will always try to grow a dimension if it already exists!
self.__dimensions[name].size += size
def exists(self, name):
"""Check if dimension exists.
:param str name: name of the dimension
:returns: True if dimension exists, otherwise False
:rtype: bool
"""
return name in self.dimensions.keys()
def get(self, name):
"""Get dimension.
:param str name: name of the dimension
:returns: dimension
:rtype: Dimension
:raises ValueError: if dimension does not exist
"""
if self.exists(name):
return self.__dimensions[name]
raise ValueError('Dimension, {}, does not exist.'.format(name))
def remove(self, name):
"""Remove dimension.
:param str name: dimension name
"""
if self.exists(name):
del self.__dimensions[name]
def tostructure(self):
"""Get data structure of Dimensions data for serialization.
:returns: dictionary of dimension names and sizes
:rtype: dict
"""
dims = {}
for kk, vv in iteritems(self.dimensions):
dims[kk] = {'size': vv.size}
return dims
class ParamDimensions(Dimensions):
"""Container for parameter dimensions.
This object adds tracking of dimension position.
"""
@property
def xml(self):
"""Get xml for the dimensions.
:returns: XML element of the dimensions
:rtype: xmlET.Element
"""
# <dimensions>
# <dimension name = "nsegment" position = "1" size = "1434" />
# </ dimensions>
dims_xml = xmlET.Element('dimensions')
for kk, vv in iteritems(self.dimensions):
dim_sub = xmlET.SubElement(dims_xml, 'dimension')
dim_sub.set('name', kk)
xmlET.SubElement(dim_sub, 'position').text = str(self.get_position(kk)+1)
xmlET.SubElement(dim_sub, 'size').text = str(vv.size)
# dim_sub.set('position', str(self.get_position(kk)+1))
# dim_sub.set('size', str(vv.size))
return dims_xml
def add_from_xml(self, filename):
"""Add one or more dimensions from an xml file.
Add or grow dimensions from XML information. This version also checks dimension position.
:param str filename: name of the xml file
:raises ValueError: if existing dimension position is altered
"""
# Add dimensions and grow dimension sizes from xml information for a parameter
# This information is found in xml files for each region for each parameter
# No attempt is made to verify whether each region for a given parameter
# has the same or same number of dimensions.
xml_root = read_xml(filename)
for cdim in xml_root.findall('./dimensions/dimension'):
name = cdim.get('name')
size = int(cdim.get('size'))
pos = int(cdim.get('position')) - 1
if name not in self.dimensions:
try:
self.dimensions[name] = Dimension(name=name, size=size)
except ValueError as err:
print(err)
else:
curr_pos = list(self.dimensions.keys()).index(name)
if curr_pos != pos:
# This indicates a problem in one of the paramdb files
raise ValueError('{}: Attempted position change from {} to {}'.format(name, curr_pos, pos))
else:
if name not in ['nmonths', 'ndays', 'one']:
# NOTE: This will always try to grow a dimension if | |
<gh_stars>1-10
from __future__ import print_function
import sys
import os
from flask import Flask, render_template, g, request, make_response, Response
import json
import requests
import slackMessages
import databaseOperations
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler
from slackclient import SlackClient
from cryptography.fernet import Fernet
from threading import Thread
app = Flask(__name__)
db_conn = databaseOperations.connect_db()
key = Fernet.generate_key()
f = Fernet(key)
scheduler = BackgroundScheduler()
scheduler.add_job(func=databaseOperations.deleteAllSlashCommnads, args=(db_conn ,), trigger="interval", seconds=3600)
scheduler.add_job(func=databaseOperations.deleteDisturb, args=(db_conn ,), trigger="interval", seconds=3600*24)
scheduler.start()
def decryptToken(team_id1):
encrypted = databaseOperations.getToken(db_conn, team_id1)
return encrypted
def getUserList(team_id1):
sc = SlackClient(decryptToken(team_id1))
usersID=sc.api_call("users.list")
userinfo={}
print("UserId",usersID)
members=usersID["members"]
print("memberS:",members)
print("memberS1:", members[1])
for i in range(len(members)):
useri=members[i]
userinfo[useri["name"]]=useri["id"]
return userinfo
def sendLocationSurveyOneUser(username,team_id1):
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
userid=userinfo.get(username)
zones=databaseOperations.getACzones(db_conn)
sc.api_call("chat.postMessage", channel=userid, blocks=slackMessages.locationimage())
sc.api_call("chat.postMessage",channel=userid,blocks=slackMessages.getLocationSurvey(zones))
return make_response("Success",200)
def response_Interactive_Message(responseurl,text="Thanks :)"):
resdata = {"replace_original": "true","text": text}
x = requests.post(responseurl,data=json.dumps(resdata))
def responseErrorMessage(responseurl, text="Thanks :)"):
resdata = {
{
"type": "context",
"elements": [
{
"text": "*Author:* <NAME>",
"color": "FF0000"
}
]
}
}
x = requests.post(responseurl, data=json.dumps(resdata))
@app.route('/feedback-collector')
def index():
return "App for Slack results (Services)"
@app.route('/feedback-collector/health')
def health():
return "UP"
@app.route("/feedback-collector/slack/oauth", methods=["GET"])
def oauth():
client_id=os.environ["CLIENT_ID"]
client_secret=os.environ["CLIENT_SECRET"]
code = request.args.get("code")
uri= 'https://slack.com/api/oauth.access?code='+code+'&client_id='+client_id+'&client_secret='+client_secret
response=requests.post(uri)
response=json.loads(response.content)
print("response::",response)
token=response["access_token"]
print("token degeri",token)
databaseOperations.setToken(db_conn,token,"admin")
return make_response("App Installed",200)
@app.route("/feedback-collector/slack/onsnooze", methods=["POST"])
def onsnooze(creater,username,team_id1): #Kullanıcılar kendilerine anket gönderilmesini engellemek istediğinde bu fonksiyon kullanılır.
with app.app_context():
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
for key,val in userinfo.items():
if key == username:
#print("Key : "+ key+"Username : "+username)
#print("userinfo[key] : "+userinfo[key])
sc.api_call("chat.postMessage", channel=userinfo[key], blocks=slackMessages.onSnooze())
else:
pass
databaseOperations.addSnoozeTable(db_conn,username)
return make_response("",200)
@app.route("/feedback-collector/slack/offsnooze", methods=["POST"])
def offSnooze(creater,username,team_id1): #Kullanıcılar kendilerine tekrar anket gelmesini istediklerinde bu fonksiyon çalışır.
with app.app_context():
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
print("username veritabanı",username)
databaseOperations.deleteSnoozeTableName(db_conn,username)
for key, val in userinfo.items():
if key == username:
#print("Key : " + key + "Username : " + username)
#print("userinfo[key] : " + userinfo[key])
sc.api_call("chat.postMessage", channel=userinfo[key], blocks=slackMessages.offSnooze())
else:
pass
return make_response("Success", 200)
@app.route("/feedback-collector/slack/airSurvey", methods=["POST"])
def sendAirSurvey(creater,team_id1): #Bu fonksiyon hava durumu için anket gönderir
with app.app_context():
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
name=databaseOperations.getSnoozeTableName(db_conn)
username = databaseOperations.getPersonalinfo(db_conn) ##personalinfo tablosundaki,stajdakilerin,isimleri
#for i in name:
#print(i[0])
#for i in username:
#print(i[0])
new = list()
tut = 0
for key,val in userinfo.items(): #tüm personel
for j in username: #stajdakiler
if j[0]==key: #sadece z
print("j[0]:"+j[0]+" key: " + key)
tut = 0
for i in name: #snooze tablosundakiler ve stajdakiler
print("i[0] : "+ i[0])
if j[0]== i[0]:
tut = 1
break
else:
tut = 0
if tut==0:
new.append(userinfo[j[0]]) #veritabanında bulunan snooze tablosunda olmayanları yeni bir listeye ekler
else:
pass
else:
pass
print(new)
for i in userinfo.values():
print(i+"\n")
for k in new:
print("+++++"+k)
if k==i:
sc.api_call("chat.postMessage", channel=i, blocks=slackMessages.getAirConSurvey()) #Yeni listedekilere anket gönderir
else:
pass
databaseOperations.addSurvey(db_conn,"Auto")
return make_response("Success",200)
@app.route("/feedback-collector/slack/locationSurvey", methods=["POST"])
def sendLocationSurvey(creater,team_id1): #Bu fonksiyon lokasyon anketini gönderir.
with app.app_context():
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
print(userinfo)
zones=databaseOperations.getACzones(db_conn)
name = databaseOperations.getSnoozeTableName(db_conn)
username = databaseOperations.getPersonalinfo(db_conn) ##personalinfo tablosundaki,stajdakilerin,isimleri
# for i in name:
# print(i[0])
# for i in username:
# print(i[0])
new = list()
tut = 0
for key, val in userinfo.items(): # tüm personel
for j in username: # stajdakiler
if j[0] == key: # sadece z
print("j[0]:" + j[0] + " key: " + key)
tut = 0
for i in name: # snooze tablosundakiler ve stajdakiler
print("i[0] : " + i[0])
if j[0] == i[0]:
tut = 1
break
else:
tut = 0
if tut == 0:
new.append(userinfo[j[0]])
else:
pass
else:
pass
print(new)
for i in userinfo.values():
print(i + "\n")
for k in new:
print("+++++" + k)
if k == i:
sc.api_call("chat.postMessage", channel=i, blocks=slackMessages.getLocationSurvey(zones))
else:
pass
return make_response("Success",200)
@app.route("/feedback-collector/slack/checkAcZone", methods=["POST"])
def checkAcZone(creater,team_id1): #Kullanıcıların önceki yerlerini gösteren fonksiyondur.
with app.app_context():
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
name = databaseOperations.getSnoozeTableName(db_conn)
username = databaseOperations.getPersonalinfo(db_conn) ##personalinfo tablosundaki,stajdakilerin,isimleri
# for i in name:
# print(i[0])
# for i in username:
# print(i[0])
new = list()
tut = 0
for key, val in userinfo.items(): # tüm personel
for j in username: # stajdakiler
if j[0] == key: # sadece z
print("j[0]:" + j[0] + " key: " + key)
tut = 0
for i in name: # snooze tablosundakiler ve stajdakiler
print("i[0] : " + i[0])
if j[0] == i[0]:
tut = 1
break
else:
tut = 0
if tut == 0:
new.append(userinfo[j[0]])
else:
pass
else:
pass
print(new)
for i in userinfo.values():
print(i + "\n")
for k in new:
print("+++++" + k)
if k == i:
sc.api_call("chat.postMessage", channel=i, blocks=slackMessages.checkAcZone())
else:
pass
return make_response("Success", 200)
@app.route("/feedback-collector/slack/locationimage", methods=["POST"])
def locationimage(creater,team_id1): #Blueprint i gösterir.
with app.app_context():
sc = SlackClient(decryptToken(team_id1))
userinfo = getUserList(team_id1)
name = databaseOperations.getSnoozeTableName(db_conn)
username = databaseOperations.getPersonalinfo(db_conn) ##personalinfo tablosundaki,stajdakilerin,isimleri
sc.api_call("chat.postMessage", channel=username, blocks=slackMessages.locationimage())
return make_response("Success", 200)
@app.route("/feedback-collector/slack/returns", methods=["POST"])
def message_actions():
global timeValueInt
form_json = json.loads(request.form["payload"])
print("====================", form_json)
responseurl = form_json["response_url"]
user = form_json["user"]
username = user["username"]
userid = user["id"]
returnText = "Success"
tut = 0
if "accessory" not in request.form["payload"]:
if form_json["actions"][0]["block_id"] != "aircondition_id":
chosen = form_json["actions"]
chosen = chosen[0]
chosen = chosen["value"]
text = "Thanks for voting"
print("Username2: " + username)
print("Chosen3:" + chosen)
response_Interactive_Message(responseurl, text)
returnText = username + " " + chosen
databaseOperations.addVoteRecord(db_conn, username, chosen)
elif form_json["actions"][0]["block_id"] == "zone_id":
location = databaseOperations.getPersonLocation(db_conn, username)
text = "You are in AC zone: " + str(location)
response_Interactive_Message(responseurl, text)
else:
pass
else:
if form_json["actions"][0]["action_id"] == "datepickerbike": # TAKVİM
tut = 1
chosen = form_json["actions"]
chosen = chosen[0]
selecteddate = chosen["selected_date"]
print("Usename3:" + username)
print("Selectedvalue2:" + selecteddate)
returnText = selecteddate
databaseOperations.adddateRecordbiking(db_conn, username, selecteddate, "", "")
response_Interactive_Message(responseurl, "Selected date:"+selecteddate)
data = databaseOperations.emptyorfullbiking(db_conn)
full = list()
# seçilen güne göre dolu odaları ve saatleri gösterir.
for i in data:
if (i[0] == selecteddate):
print("Dolu bisikletler: ", str(i[1]), "Dolu saatler:", str(i[2]))
full.append((str(i[1]), str(i[2])))
text = "Selected to" + selecteddate + "\n"
kontrol = 0
for i in full:
if (i[0] == 'None' or i[1] == 'None'):
pass
else:
kontrol = 1
text += i[0] + " is full between " + i[1] + "\n"
if (kontrol == 1):
text += " hours. You can select the biking and time according to these values."
response_Interactive_Message(responseurl, text)
if form_json["actions"][0]["action_id"] == "staticselectbike": #BİKE CHOOSE
print("====================staticselectbike", form_json)
actions = form_json["actions"]
actions = actions[0]
selectedoption = actions["selected_option"]
selectedValue = selectedoption["value"]
print("Burası Value değeri",selectedValue)
if (selectedValue == "Bike 1" or selectedValue == "Bike 2" or selectedValue == "Bike 3"):
text = "Selected bike:" + selectedValue
returnText = username + " " + selectedValue
databaseOperations.adddateRecordbiking(db_conn, username, "", selectedValue, "")
response_Interactive_Message(responseurl, text)
if form_json["actions"][0]["action_id"] == "clock_id3": #saat
actions = form_json["actions"]
actions = actions[0]
selectedoption = actions["selected_option"]
#başlangıç saati
selectedValue = selectedoption["value"]
print("*****bisiklet kullanımı başlangıç saati*****:",selectedValue)
databaseOperations.addtemporarytimes(db_conn,selectedValue)
returnText = "Start time:" + selectedValue
response_Interactive_Message(responseurl, returnText)
if form_json["actions"][0]["action_id"] == "clock_id4": # saat
actions = form_json["actions"]
actions = actions[0]
selectedoption = actions["selected_option"]
# bisiklet kullanımı bitiş saati
finishtime = selectedoption["value"]
print("Bitiş saati:", finishtime)
returnText = username + " " + finishtime
# önceki işlemler
veri = databaseOperations.taketimes(db_conn)
#bisiklet kullanımı başlangıç saati
starttime=veri[0][0]
print("veri",starttime)
data = databaseOperations.emptyorfullbiking(db_conn)
full = list()
#Seçilen Son tarih
lastdate = databaseOperations.lastdateforbiking(db_conn)
lastdate = lastdate[0][0]
print("lastdate", lastdate)
#Seçilen Son bisiklet
lastbike=databaseOperations.lastbikeforbiking(db_conn)
lastbike=lastbike[0][0]
#Karşılaştırma için verilerin hazırlanması
s=starttime[0:2] + starttime[3:5]
s1=int(s)
print("s1",s1)
s22=finishtime[0:2] + finishtime[3:5]
s2=int(s22)
print("s2",s2)
timeresult = starttime + "-" + finishtime
listeclocks=list()
k=0
for i in data:
if (i[0] == lastdate and i[1]==lastbike):
print("i[3]",i[2])
if(i[2] == None):
pass
else:
print("i deger", i)
listeclocks = i[2].split("-")
a = listeclocks[0]
b = listeclocks[1]
print("listeclocks[0]", listeclocks[0])
print("listeclocks[1]", listeclocks[1])
# veritabanındaki veriler
c = a[0:2] + a[3:5]
x = int(c)
print("veritabanı saat baslangıc",x)
d = b[0:2] + b[3:5]
y = int(d)
print("veritabanı saat bitiş",y)
if not ((s1 < x and s2 <= x) or (s1 >= y and s2 > y)):
k = 1
else:
pass
else:
pass
if(k==0):
if (s2 > s1):
databaseOperations.adddateRecordbiking(db_conn, username, "", "", timeresult)
response_Interactive_Message(responseurl, "End time:" + finishtime+" \n Clock selected to:"+timeresult)
else:
response_Interactive_Message(responseurl, "*Select an end time greater than the start time*.\nPlease call '/biking' again and select a new one.")
databaseOperations.deletebikinglastrow(db_conn)
else:
response_Interactive_Message(responseurl,"*Clock selected to:" + timeresult + " but \n *This bike is full on dates*. Please call '/biking' again and select a new one.")
databaseOperations.deletebikinglastrow(db_conn)
if form_json["actions"][0]["action_id"] == "datepickerroom": # datepicker da bir yere tıklandığında (ROOM TAKVİM)
tut = 1
chosen = form_json["actions"]
chosen = chosen[0]
selecteddate = chosen["selected_date"]
print("Usename3:" + username)
print("Selectedvalue2:" + selecteddate)
returnText = selecteddate
databaseOperations.adddateRecord(db_conn, username, selecteddate, "", "")
data = databaseOperations.emptyorfull(db_conn)
full = list()
# seçilen | |
= py - glareSize
if boundYl < 0: boundYl = 0
boundYu = py + glareSize
if boundYu > rows - 1: boundYu = rows - 1
# the pixel value of the sun position
sunpxl = 125
boundXl = px - glareSize
boundXu = px + glareSize
if boundXl < 0: # too left then part of right
print ('Too left, move to right')
boundXl2 = boundXl + cols - 1
if bands == 1:
cylindrialPanoImg[boundYl:boundYu,boundXl2:cols-1] = sunpxl
cylindrialPanoImg[boundYl:boundYu,0:boundXu] = sunpxl
else:
cylindrialPanoImg[boundYl:boundYu,boundXl2:cols-1, 0] = sunpxl
cylindrialPanoImg[boundYl:boundYu,boundXl2:cols-1, 1] = 0
cylindrialPanoImg[boundYl:boundYu,boundXl2:cols-1, 2] = 0
cylindrialPanoImg[boundYl:boundYu,0:boundXu, 0] = sunpxl
cylindrialPanoImg[boundYl:boundYu,0:boundXu, 1] = 0
cylindrialPanoImg[boundYl:boundYu,0:boundXu, 2] = 0
elif boundXu > cols - 1:
print ("too right, move to left")
boundXu2 = boundXu - cols + 1
if bands == 1:
cylindrialPanoImg[boundYl:boundYu,boundXl:cols-1] = sunpxl
cylindrialPanoImg[boundYl:boundYu,0:boundXu2] = sunpxl
else:
cylindrialPanoImg[boundYl:boundYu,boundXl:cols-1, 0] = sunpxl
cylindrialPanoImg[boundYl:boundYu,boundXl:cols-1, 1] = 0
cylindrialPanoImg[boundYl:boundYu,boundXl:cols-1, 2] = 0
cylindrialPanoImg[boundYl:boundYu,0:boundXu2, 0] = sunpxl
cylindrialPanoImg[boundYl:boundYu,0:boundXu2, 1] = 0
cylindrialPanoImg[boundYl:boundYu,0:boundXu2, 2] = 0
else:
if bands == 1:
cylindrialPanoImg[boundYl:boundYu,boundXl:boundXu] = sunpxl
else:
cylindrialPanoImg[boundYl:boundYu,boundXl:boundXu, 0] = sunpxl
cylindrialPanoImg[boundYl:boundYu,boundXl:boundXu, 1] = 0
cylindrialPanoImg[boundYl:boundYu,boundXl:boundXu, 2] = 0
# cylindrialPanoImg = ShiftCylindricalPanoImg(cylindrialPanoImg, -1*yaw)
# export the outputCylindricalPanoImage as a new Image file
imgSunpath = Image.fromarray(cylindrialPanoImg)
imgSunpath.save(plotedCylindImgFile)
del imgSunpath
return cylindrialPanoImg, px, py
# plot the sun path in one day on the cylindrical panorama, using noaa method to estimate the sun position
def plot_SunPathOnCylindricalImage_noaa(cylindrialPanoImg, plotedCylindImg, lat, lon, zone, year, month, day, yaw):
'''
# This function is used to plot the sunpath on cylindrical image
# Copyright (C) <NAME>, MIT Senseable City Lab
# First version March 17, 2018
Parameters:
cylindrialPanoImg: the input cylindrical image, numpy array
plotedFisheyeImg: the output plotted cylindrical panorama with sun position
lat, lon: the coordinate of the panorama
zone: the time zone of the area based on UTC, For example Boston is 5
year, month, day: used to calculate the solar position
yaw: the yaw angle of the driving car, the north is zero, clockwise
'''
from PIL import Image
import numpy as np
import os, os.path
import math
import SunposLib as sunpos
# shift the original cylindricalPanorama based on the yaw, to make sure the central column is north
shiftedCylindrialPanoImg = ShiftCylindricalPanoImg(cylindrialPanoImg, yaw)
# plot the sun positions at different times in one day on the the cylindrical image
# for month in range(5,6):
print ('The month is:-------', month)
for hour in range(5, 20):
# minute = 30
second = 30
for minute in range(0, 60, 10):
[azimuth, sunele] = sunpos.calcSun(lat, lon, zone, year, month, day, hour, minute, second)
print ('The hour, sunele and azimuth are:=======', hour, sunele, azimuth)
[shiftedCylindrialPanoImg, px, py] = plot_SunPosOnCylindricalImage_noaa(shiftedCylindrialPanoImg, plotedCylindImg, azimuth, sunele)
# unshift the panorama to original orientation
# shiftedCylindrialPanoImg = ShiftCylindricalPanoImg(shiftedCylindrialPanoImg, -1*yaw)
# export the outputCylindricalPanoImage as a new Image file
imgSunpath = Image.fromarray(shiftedCylindrialPanoImg)
imgSunpath.save(plotedCylindImg)
del cylindrialPanoImg, imgSunpath, shiftedCylindrialPanoImg
# Judge if the site is shaded or not based on the pysolar, may not work well
def Shaded_judgement(skyImg, skypixelLabel, glareSize, currentTime,lon, lat):
'''
# This function is used to judge if one site is exposed to sunlight or not
# by overlaying the sunposition on the skyImg
# Copyright (C) <NAME>, MIT Senseable City Lab
# First version Dec 23, 2017
# modified by Dec 28, 2017
# last modified by Feb 10, 2018
Parameters:
panoImgFile: the input classified hemispherical image, numpy array
skypixelLabel: the pixel value of the sky in the segmented classification result
glareSize: The size of the sun glare, default is 4
currentTime: the time, time object
lon, lat are the coordinate of the site
'''
from PIL import Image
import numpy as np
import os, os.path
import math
from pysolar.solar import get_altitude, get_azimuth, radiation
# read the hemispherical image
# skyImg = np.array(Image.open(panoImgFile))
[cols, rows] = skyImg.shape
# calculate the position of sun based on the coordinate and the time information
# sunele = get_altitude(lat, lon, currentTime)
# sunele = sunele*np.pi/180.0
# azimuth = 180 - abs(get_azimuth(lat, lon, currentTime))
# azimuth = azimuth*np.pi/180.0 + 0.5*np.pi
sunele = get_altitude(lat, lon, currentTime) #the sun elevation angle
azimuth = abs(get_azimuth(lat, lon, currentTime)) #the get_azimuth will get azimuth start from south as zero
#adjusted sun azimuthal angle, the north is the zero
azimuth = 180 + azimuth
if azimuth > 360: azimuth = azimuth - 360
# adjust the azimuth to to the image azimth, the east is the zero
azimuth = 360 - azimuth + 90
if azimuth > 360: azimuth = azimuth - 360
sunele = sunele*np.pi/180.0
azimuth = azimuth*np.pi/180.0
# BASED ON THE AZIMUTH AND SUN ELEVATION TO LOCATE THE CORRESPODING PIXELS ON THE FISHEYE IMAGE
R = int(0.5*rows)
# get the r in the polar coordinate
if sunele < 0: sunele = 0
# Using different coordinate, equidistance or equi-anglular hemispherical
# r = math.cos(sunele)*R
r = (90 - sunele*180/np.pi)/90.0*R
# get the coordinate of the point on the fisheye images
px = int(r*math.cos(azimuth) + int(0.5*cols)) - 1
py = int(int(0.5*rows) - r*math.sin(azimuth)) - 1
# the sun glare size
boundXl = px - glareSize
if boundXl < 0: boundXl = 0
boundXu = px + glareSize
if boundXu > cols - 1: boundXu = rows - 1
boundYl = py - glareSize
if boundYl < 0: boundYl = 0
boundYu = py + glareSize
if boundYu > rows - 1: boundYu = cols - 1
# judge if the sun is located on obstruction or open sky pixel
# idx = np.where(skyImg[boundYl:boundYu,boundXl:boundXu] > 0)
# based on the classification algorith of the pspnet, the sky pixels have the value of 2, it may change in different classification result
idx = np.where(skyImg[boundYl:boundYu,boundXl:boundXu] == skypixelLabel)
if len(idx[0])/(4*glareSize*glareSize) > 0.5:
#print ('This site is not shaded')
shade = 0
else:
#print ('The site is shaded')
shade = 1
del skyImg
return shade
# Judge if the site is shaded or not based on the hemispherical image, using noaa method
# azimuth and sunele are in degrees
def Shaded_judgement_noaa(skyImg, obstructionpixelLabel, glareSize, azimuth, sunele):
'''
# This function is used to judge if one site is exposed to sunlight or not
# by overlaying the sunposition on the skyImg
# Copyright (C) <NAME>, MIT Senseable City Lab
# First version Dec 23, 2017
# modified by Dec 28, 2017
# last modified by Feb 10, 2018
Parameters:
panoImgFile: the input classified hemispherical image, numpy array
obstructionpixelLabel: the pixel value of the obstruction in the segmented classification result
glareSize: The size of the sun glare, default is 4
azimuth, sunele: the azimuth and sun elevation angle
'''
from PIL import Image
import numpy as np
import os, os.path
import math
# read the hemispherical image
# skyImg = np.array(Image.open(panoImgFile))
[cols, rows] = skyImg.shape
# considering the different coordinate system, the sun azimuth calculated from sun pos
# need to convert to the same coordinate system with the hemispherical image
# the azimuth start from north, with clockwise; the azimuth_skyimg starts from east direction anti-clockwisely
azimuth_skyimg = -(azimuth - 90)
if azimuth_skyimg < 0: azimuth_skyimg = azimuth_skyimg + 360
sunele = sunele*np.pi/180.0
azimuth = azimuth_skyimg*np.pi/180.0
# BASED ON THE AZIMUTH AND SUN ELEVATION TO LOCATE THE CORRESPODING PIXELS ON THE FISHEYE IMAGE
R = int(0.5*rows)
# get the r in the polar coordinate
if sunele < 0: sunele = 0
# Using different coordinate, equidistance or equi-anglular hemispherical
# r = math.cos(sunele)*R
r = (90 - sunele*180/np.pi)/90.0*R
# get the coordinate of the point on the fisheye images
px = int(r*math.cos(azimuth) + int(0.5*cols)) - 1
py = int(int(0.5*rows) - r*math.sin(azimuth)) - 1
# the sun glare | |
None,
modifier_func2 = None,
means = 'geometric')
class MPO_meta:
def __init__(self, means):
'''
target_smiles, fp in ['ECFP4', 'AP', ..., ]
scoring,
modifier,
'''
assert means in ['geometric', 'arithmetic']
self.mean_func = mean2func[means]
def __call__(self, test_smiles):
molecule = smiles_to_rdkit_mol(test_smiles)
score_lst = []
return self.mean_func(score_lst)
def osimertinib_mpo(test_smiles):
if 'osimertinib_fp_fcfc4' not in globals().keys():
global osimertinib_fp_fcfc4, osimertinib_fp_ecfc6
osimertinib_smiles = 'COc1cc(N(C)CCN(C)C)c(NC(=O)C=C)cc1Nc2nccc(n2)c3cn(C)c4ccccc34'
osimertinib_fp_fcfc4 = smiles_2_fingerprint_FCFP4(osimertinib_smiles)
osimertinib_fp_ecfc6 = smiles_2_fingerprint_ECFP6(osimertinib_smiles)
sim_v1_modifier = ClippedScoreModifier(upper_x=0.8)
sim_v2_modifier = MinGaussianModifier(mu=0.85, sigma=0.1)
tpsa_modifier = MaxGaussianModifier(mu=100, sigma=10)
logp_modifier = MinGaussianModifier(mu=1, sigma=1)
molecule = smiles_to_rdkit_mol(test_smiles)
fp_fcfc4 = smiles_2_fingerprint_FCFP4(test_smiles)
fp_ecfc6 = smiles_2_fingerprint_ECFP6(test_smiles)
tpsa_score = tpsa_modifier(Descriptors.TPSA(molecule))
logp_score = logp_modifier(Descriptors.MolLogP(molecule))
similarity_v1 = sim_v1_modifier(DataStructs.TanimotoSimilarity(osimertinib_fp_fcfc4, fp_fcfc4))
similarity_v2 = sim_v2_modifier(DataStructs.TanimotoSimilarity(osimertinib_fp_ecfc6, fp_ecfc6))
osimertinib_gmean = gmean([tpsa_score, logp_score, similarity_v1, similarity_v2])
return osimertinib_gmean
def fexofenadine_mpo(test_smiles):
if 'fexofenadine_fp' not in globals().keys():
global fexofenadine_fp
fexofenadine_smiles = 'CC(C)(C(=O)O)c1ccc(cc1)C(O)CCCN2CCC(CC2)C(O)(c3ccccc3)c4ccccc4'
fexofenadine_fp = smiles_2_fingerprint_AP(fexofenadine_smiles)
similar_modifier = ClippedScoreModifier(upper_x=0.8)
tpsa_modifier=MaxGaussianModifier(mu=90, sigma=10)
logp_modifier=MinGaussianModifier(mu=4, sigma=1)
molecule = smiles_to_rdkit_mol(test_smiles)
fp_ap = smiles_2_fingerprint_AP(test_smiles)
tpsa_score = tpsa_modifier(Descriptors.TPSA(molecule))
logp_score = logp_modifier(Descriptors.MolLogP(molecule))
similarity_value = similar_modifier(DataStructs.TanimotoSimilarity(fp_ap, fexofenadine_fp))
fexofenadine_gmean = gmean([tpsa_score, logp_score, similarity_value])
return fexofenadine_gmean
def ranolazine_mpo(test_smiles):
if 'ranolazine_fp' not in globals().keys():
global ranolazine_fp, fluorine_counter
ranolazine_smiles = 'COc1ccccc1OCC(O)CN2CCN(CC(=O)Nc3c(C)cccc3C)CC2'
ranolazine_fp = smiles_2_fingerprint_AP(ranolazine_smiles)
fluorine_counter = AtomCounter('F')
similar_modifier = ClippedScoreModifier(upper_x=0.7)
tpsa_modifier = MaxGaussianModifier(mu=95, sigma=20)
logp_modifier = MaxGaussianModifier(mu=7, sigma=1)
fluorine_modifier = GaussianModifier(mu=1, sigma=1.0)
molecule = smiles_to_rdkit_mol(test_smiles)
fp_ap = smiles_2_fingerprint_AP(test_smiles)
tpsa_score = tpsa_modifier(Descriptors.TPSA(molecule))
logp_score = logp_modifier(Descriptors.MolLogP(molecule))
similarity_value = similar_modifier(DataStructs.TanimotoSimilarity(fp_ap, ranolazine_fp))
fluorine_value = fluorine_modifier(fluorine_counter(molecule))
ranolazine_gmean = gmean([tpsa_score, logp_score, similarity_value, fluorine_value])
return ranolazine_gmean
def perindopril_mpo(test_smiles):
## no similar_modifier
if 'perindopril_fp' not in globals().keys():
global perindopril_fp, num_aromatic_rings
perindopril_smiles = 'O=C(OCC)C(NC(C(=O)N1C(C(=O)O)CC2CCCCC12)C)CCC'
perindopril_fp = smiles_2_fingerprint_ECFP4(perindopril_smiles)
def num_aromatic_rings(mol):
return rdMolDescriptors.CalcNumAromaticRings(mol)
arom_rings_modifier = GaussianModifier(mu = 2, sigma = 0.5)
molecule = smiles_to_rdkit_mol(test_smiles)
fp_ecfp4 = smiles_2_fingerprint_ECFP4(test_smiles)
similarity_value = DataStructs.TanimotoSimilarity(fp_ecfp4, perindopril_fp)
num_aromatic_rings_value = arom_rings_modifier(num_aromatic_rings(molecule))
perindopril_gmean = gmean([similarity_value, num_aromatic_rings_value])
return perindopril_gmean
def amlodipine_mpo(test_smiles):
## no similar_modifier
if 'amlodipine_fp' not in globals().keys():
global amlodipine_fp, num_rings
amlodipine_smiles = 'Clc1ccccc1C2C(=C(/N/C(=C2/C(=O)OCC)COCCN)C)\C(=O)OC'
amlodipine_fp = smiles_2_fingerprint_ECFP4(amlodipine_smiles)
def num_rings(mol):
return rdMolDescriptors.CalcNumRings(mol)
num_rings_modifier = GaussianModifier(mu=3, sigma=0.5)
molecule = smiles_to_rdkit_mol(test_smiles)
fp_ecfp4 = smiles_2_fingerprint_ECFP4(test_smiles)
similarity_value = DataStructs.TanimotoSimilarity(fp_ecfp4, amlodipine_fp)
num_rings_value = num_rings_modifier(num_rings(molecule))
amlodipine_gmean = gmean([similarity_value, num_rings_value])
return amlodipine_gmean
def zaleplon_mpo(test_smiles):
if 'zaleplon_fp' not in globals().keys():
global zaleplon_fp, isomer_scoring_C19H17N3O2
zaleplon_smiles = 'O=C(C)N(CC)C1=CC=CC(C2=CC=NC3=C(C=NN23)C#N)=C1'
zaleplon_fp = smiles_2_fingerprint_ECFP4(zaleplon_smiles)
isomer_scoring_C19H17N3O2 = Isomer_scoring(target_smiles = 'C19H17N3O2')
fp = smiles_2_fingerprint_ECFP4(test_smiles)
similarity_value = DataStructs.TanimotoSimilarity(fp, zaleplon_fp)
isomer_value = isomer_scoring_C19H17N3O2(test_smiles)
return gmean([similarity_value, isomer_value])
def sitagliptin_mpo(test_smiles):
if 'sitagliptin_fp_ecfp4' not in globals().keys():
global sitagliptin_fp_ecfp4, sitagliptin_logp_modifier, sitagliptin_tpsa_modifier, \
isomers_scoring_C16H15F6N5O, sitagliptin_similar_modifier
sitagliptin_smiles = 'Fc1cc(c(F)cc1F)CC(N)CC(=O)N3Cc2nnc(n2CC3)C(F)(F)F'
sitagliptin_fp_ecfp4 = smiles_2_fingerprint_ECFP4(sitagliptin_smiles)
sitagliptin_mol = Chem.MolFromSmiles(sitagliptin_smiles)
sitagliptin_logp = Descriptors.MolLogP(sitagliptin_mol)
sitagliptin_tpsa = Descriptors.TPSA(sitagliptin_mol)
sitagliptin_logp_modifier = GaussianModifier(mu=sitagliptin_logp, sigma=0.2)
sitagliptin_tpsa_modifier = GaussianModifier(mu=sitagliptin_tpsa, sigma=5)
isomers_scoring_C16H15F6N5O = Isomer_scoring('C16H15F6N5O')
sitagliptin_similar_modifier = GaussianModifier(mu=0, sigma=0.1)
molecule = Chem.MolFromSmiles(test_smiles)
fp_ecfp4 = smiles_2_fingerprint_ECFP4(test_smiles)
logp_score = Descriptors.MolLogP(molecule)
tpsa_score = Descriptors.TPSA(molecule)
isomer_score = isomers_scoring_C16H15F6N5O(test_smiles)
similarity_value = DataStructs.TanimotoSimilarity(fp_ecfp4, sitagliptin_fp_ecfp4)
return gmean([similarity_value, logp_score, tpsa_score, isomer_score])
def get_PHCO_fingerprint(mol):
if 'Gobbi_Pharm2D' not in globals().keys():
global Gobbi_Pharm2D, Generate
from rdkit.Chem.Pharm2D import Generate, Gobbi_Pharm2D
return Generate.Gen2DFingerprint(mol, Gobbi_Pharm2D.factory)
class SMARTS_scoring:
def __init__(self, target_smarts, inverse):
self.target_mol = Chem.MolFromSmarts(target_smarts)
self.inverse = inverse
def __call__(self, mol):
matches = mol.GetSubstructMatches(self.target_mol)
if len(matches) > 0:
if self.inverse:
return 0.0
else:
return 1.0
else:
if self.inverse:
return 1.0
else:
return 0.0
def deco_hop(test_smiles):
if 'pharmacophor_fp' not in globals().keys():
global pharmacophor_fp, deco1_smarts_scoring, deco2_smarts_scoring, scaffold_smarts_scoring
pharmacophor_smiles = 'CCCOc1cc2ncnc(Nc3ccc4ncsc4c3)c2cc1S(=O)(=O)C(C)(C)C'
pharmacophor_mol = smiles_to_rdkit_mol(pharmacophor_smiles)
pharmacophor_fp = get_PHCO_fingerprint(pharmacophor_mol)
deco1_smarts_scoring = SMARTS_scoring(target_smarts = 'CS([#6])(=O)=O', inverse = True)
deco2_smarts_scoring = SMARTS_scoring(target_smarts = '[#7]-c1ccc2ncsc2c1', inverse = True)
scaffold_smarts_scoring = SMARTS_scoring(target_smarts = '[#7]-c1n[c;h1]nc2[c;h1]c(-[#8])[c;h0][c;h1]c12', inverse = False)
molecule = smiles_to_rdkit_mol(test_smiles)
fp = get_PHCO_fingerprint(molecule)
similarity_modifier = ClippedScoreModifier(upper_x=0.85)
similarity_value = similarity_modifier(DataStructs.TanimotoSimilarity(fp, pharmacophor_fp))
deco1_score = deco1_smarts_scoring(molecule)
deco2_score = deco2_smarts_scoring(molecule)
scaffold_score = scaffold_smarts_scoring(molecule)
all_scores = np.mean([similarity_value, deco1_score, deco2_score, scaffold_score])
return all_scores
def scaffold_hop(test_smiles):
if 'pharmacophor_fp' not in globals().keys() \
or 'scaffold_smarts_scoring' not in globals().keys() \
or 'deco_smarts_scoring' not in globals().keys():
global pharmacophor_fp, deco_smarts_scoring, scaffold_smarts_scoring
pharmacophor_smiles = 'CCCOc1cc2ncnc(Nc3ccc4ncsc4c3)c2cc1S(=O)(=O)C(C)(C)C'
pharmacophor_mol = smiles_to_rdkit_mol(pharmacophor_smiles)
pharmacophor_fp = get_PHCO_fingerprint(pharmacophor_mol)
deco_smarts_scoring = SMARTS_scoring(target_smarts = '[#6]-[#6]-[#6]-[#8]-[#6]~[#6]~[#6]~[#6]~[#6]-[#7]-c1ccc2ncsc2c1',
inverse=False)
scaffold_smarts_scoring = SMARTS_scoring(target_smarts = '[#7]-c1n[c;h1]nc2[c;h1]c(-[#8])[c;h0][c;h1]c12',
inverse=True)
molecule = smiles_to_rdkit_mol(test_smiles)
fp = get_PHCO_fingerprint(molecule)
similarity_modifier = ClippedScoreModifier(upper_x=0.75)
similarity_value = similarity_modifier(DataStructs.TanimotoSimilarity(fp, pharmacophor_fp))
deco_score = deco_smarts_scoring(molecule)
scaffold_score = scaffold_smarts_scoring(molecule)
all_scores = np.mean([similarity_value, deco_score, scaffold_score])
return all_scores
def valsartan_smarts(test_smiles):
if 'valsartan_logp_modifier' not in globals().keys():
global valsartan_mol, valsartan_logp_modifier, valsartan_tpsa_modifier, valsartan_bertz_modifier
valsartan_smarts = 'CN(C=O)Cc1ccc(c2ccccc2)cc1' ### smarts
valsartan_mol = Chem.MolFromSmarts(valsartan_smarts)
sitagliptin_smiles = 'NC(CC(=O)N1CCn2c(nnc2C(F)(F)F)C1)Cc1cc(F)c(F)cc1F' ### other mol
sitagliptin_mol = Chem.MolFromSmiles(sitagliptin_smiles)
target_logp = Descriptors.MolLogP(sitagliptin_mol)
target_tpsa = Descriptors.TPSA(sitagliptin_mol)
target_bertz = Descriptors.BertzCT(sitagliptin_mol)
valsartan_logp_modifier = GaussianModifier(mu=target_logp, sigma=0.2)
valsartan_tpsa_modifier = GaussianModifier(mu=target_tpsa, sigma=5)
valsartan_bertz_modifier = GaussianModifier(mu=target_bertz, sigma=30)
molecule = smiles_to_rdkit_mol(test_smiles)
matches = molecule.GetSubstructMatches(valsartan_mol)
if len(matches) > 0:
smarts_score = 1.0
else:
smarts_score = 0.0
logp_score = valsartan_logp_modifier(Descriptors.MolLogP(molecule))
tpsa_score = valsartan_tpsa_modifier(Descriptors.TPSA(molecule))
bertz_score = valsartan_bertz_modifier(Descriptors.BertzCT(molecule))
valsartan_gmean = gmean([smarts_score, tpsa_score, logp_score, bertz_score])
return valsartan_gmean
###########################################################################
### END of Guacamol
###########################################################################
'''
Synthesizability from a full retrosynthetic analysis
Including:
1. MIT ASKCOS
ASKCOS (https://askcos.mit.edu) is an open-source software
framework that integrates efforts to generalize known chemistry
to new substrates by learning to apply retrosynthetic transformations,
to identify suitable reaction conditions, and to evaluate whether
reactions are likely to be successful. The data-driven models are trained
with USPTO and Reaxys databases.
Reference:
https://doi.org/10.1021/acs.jcim.0c00174
2. IBM_RXN
IBM RXN (https://rxn.res.ibm.com) is an AI platform integarting
forward reaction prediction and retrosynthetic analysis. The
backend of the IBM RXN retrosynthetic analysis is Molecular
Transformer model (see reference). The model was mainly trained
with USPTO, Pistachio databases.
Reference:
https://doi.org/10.1021/acscentsci.9b00576
'''
def tree_analysis(current):
"""
Analyze the result of tree builder
Calculate: 1. Number of steps 2. \Pi plausibility 3. If find a path
In case of celery error, all values are -1
return:
num_path = number of paths found
status: Same as implemented in ASKCOS one
num_step: number of steps
p_score: \Pi plausibility
synthesizability: binary code
price: price for synthesize query compound
"""
if 'error' in current.keys():
return -1, {}, 11, -1, -1, -1
if 'price' in current.keys():
return 0, {}, 0, 1, 1, current['price']
num_path = len(current['trees'])
if num_path != 0:
current = [current['trees'][0]]
if current[0]['ppg'] != 0:
return 0, {}, 0, 1, 1, current[0]['ppg']
else:
current = []
depth = 0
p_score = 1
status = {0:1}
price = 0
while True:
num_child = 0
depth += 0.5
temp = []
for i, item in enumerate(current):
num_child += len(item['children'])
temp = temp + item['children']
if num_child == 0:
break
if depth % 1 != 0:
for sth in temp:
p_score = p_score * sth['plausibility']
else:
for mol in temp:
price += mol['ppg']
status[depth] = num_child
current = temp
if len(status) > 1:
synthesizability = 1
else:
synthesizability = 0
if int(depth - 0.5) == 0:
depth = 11
price = -1
else:
depth = int(depth - 0.5)
return num_path, status, depth, p_score*synthesizability, synthesizability, price
def askcos(smiles, host_ip, output='plausibility', save_json=False, file_name='tree_builder_result.json', num_trials=5,
max_depth=9, max_branching=25, expansion_time=60, max_ppg=100, template_count=1000, max_cum_prob=0.999,
chemical_property_logic='none', max_chemprop_c=0, max_chemprop_n=0, max_chemprop_o=0, max_chemprop_h=0,
chemical_popularity_logic='none', min_chempop_reactants=5, min_chempop_products=5, filter_threshold=0.1, return_first='true'):
"""
The ASKCOS retrosynthetic analysis oracle function.
Please refer https://github.com/connorcoley/ASKCOS to run the ASKCOS with docker on a server to receive requests.
"""
if output not in ['num_step', 'plausibility', 'synthesizability', 'price']:
raise NameError("This output value is not implemented. Please select one from 'num_step', 'plausibility', 'synthesizability', 'price'.")
import json, requests
params = {
'smiles': smiles
}
resp = requests.get(host_ip+'/api/price/', params=params, verify=False)
if resp.json()['price'] == 0:
# Parameters for Tree Builder
params = {
'smiles': smiles,
# optional
'max_depth': max_depth,
'max_branching': max_branching,
'expansion_time': expansion_time,
'max_ppg': max_ppg,
'template_count': template_count,
'max_cum_prob': max_cum_prob,
'chemical_property_logic': chemical_property_logic,
'max_chemprop_c': max_chemprop_c,
'max_chemprop_n': max_chemprop_n,
'max_chemprop_o': max_chemprop_o,
'max_chemprop_h': max_chemprop_h,
'chemical_popularity_logic': chemical_popularity_logic,
'min_chempop_reactants': min_chempop_reactants,
'min_chempop_products': min_chempop_products,
'filter_threshold': filter_threshold,
'return_first': return_first
}
# For each entry, repeat to test up to num_trials times if got error message
for _ in range(num_trials):
print('Trying to send the request, for the %i times now' % (_ + 1))
resp = requests.get(host_ip + '/api/treebuilder/', params=params, verify=False)
if 'error' not in resp.json().keys():
break
if save_json:
with open(file_name, 'w') as f_data:
json.dump(resp.json(), f_data)
num_path, status, depth, p_score, synthesizability, price = tree_analysis(resp.json())
if output == 'plausibility':
return p_score
elif output == 'num_step':
return depth
elif output == 'synthesizability':
return synthesizability
elif output == 'price':
return price
def ibm_rxn(smiles, api_key, output='confidence', sleep_time=30):
"""
This function is modified from Dr. <NAME>'s code
"""
try:
from rxn4chemistry import RXN4ChemistryWrapper
except:
print_sys("Please install rxn4chemistry | |
import logging
import os
import pathlib
import sys
import time
from queue import Empty, Queue
from threading import Event
from typing import Optional, Sequence
import numpy as np
import torch
from google.cloud import container_v1 as container
from rich.progress import (
BarColumn,
Progress,
ProgressColumn,
Task,
Text,
TimeRemainingColumn,
)
from tritonclient import grpc as triton
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from hermes import quiver as qv # noqa
from hermes.cloudbreak.clouds import google as cb # noqa
from hermes.typeo import typeo # noqa
class IdentityModel(torch.nn.Module):
"""Simple model which just performs an identity transformation
Args:
size:
The size of the dimension being transformed
"""
def __init__(self, size: int = 10):
super().__init__()
self.W = torch.eye(size)
def forward(self, x):
return torch.matmul(x, self.W)
class Throttle:
def __init__(self, target_rate: float, alpha: float = 0.9):
self.target_rate = target_rate
self.alpha = alpha
self.unset()
def unset(self):
self._n = 0
self._delta = 0
self._start_time = None
self._last_time = None
@property
def rate(self):
if self._start_time is None:
return None
return self._n / (time.time() - self._start_time)
@property
def sleep_time(self):
return (1 / self.target_rate) - self._delta
def update(self):
self._last_time = time.time()
self._n += 1
diff = (1 / self.rate) - (1 / self.target_rate)
self._delta = self._delta + (1 - self.alpha) * diff
def __enter__(self):
self._start_time = self._last_time = time.time()
return self
def __exit__(self, *exc_args):
self.unset()
def throttle(self):
while (time.time() - self._last_time) < self.sleep_time:
time.sleep(1e-6)
self.update()
class ThroughputColumn(ProgressColumn):
"""Simple progress column for measuring throughput in inferences / s"""
def render(self, task: "Task") -> Text:
"""Show data throughput"""
speed = task.finished_speed or task.speed
if speed is None:
return Text("?", style="progress.data.speed")
return Text(f"{speed:0.1f} inf/s", style="progress.data.speed")
def export(
model_repository_bucket: str,
model_name: str,
streams_per_gpu: int,
num_models: int = 2,
instances_per_gpu: int = 4,
credentials: Optional[str] = None,
) -> qv.ModelRepository:
"""Export a ResNet model to a Google Cloud Storage model repository
Instantiates (and randomly initializes) a ResNet 18 model
and creates a cloud-based model repository in Google Cloud
to which to export it.
Args:
model_repository_bucket:
The name of the cloud storage bucket to which
to export the model. Must be _globally_ unique
model_name:
The name to assign to the model in the repository
streams_per_gpu:
The number of snapshot states to maintain on each GPU
num_models:
The number of models to use in the ensemble
instances_per_gpu:
Number of concurrent inference execution instances
to host per GPU
credentials:
The path to a JSON file containing user-managed
service account credentials or `None`, in which case
such a path should be contained in the environment
variable `GOOGLE_APPLICATION_CREDENTIALS`
Returns:
The `ModelRepository` object representing the actual
repository location
"""
# initialize a ResNet18 model with random weights in-memory
nn = IdentityModel(size=100)
logging.info("Instantiated ResNet18 model")
# instatiate a model repository in a cloud bucket
model_repository_bucket = "gs://" + model_repository_bucket
logging.info(f"Creating cloud model repository {model_repository_bucket}")
model_repository = qv.ModelRepository(
model_repository_bucket, credentials=credentials
)
# create an ensemble model in our model repository
# which we'll add two streaming models to
ensemble = model_repository.add("ensemble", platform=qv.Platform.ENSEMBLE)
inputs = []
for i in range(num_models):
# create an entry for a new model in the repo
name = f"{model_name}_{i}"
model = model_repository.add(name, platform=qv.Platform.ONNX)
# set some config parameters
model.config.max_batch_size = 1
model.config.add_instance_group(count=instances_per_gpu)
# export the version of this model corresponding to this
# particular set of (random) weights. Specify the shape
# of the inputs to the model and the names of the outputs
logging.info("Exporting model to repository")
export_path = model.export_version(
nn, input_shapes={"x": (None, 1, 100)}, output_names="y"
)
logging.info(
f"Exported model to {model_repository_bucket}/{export_path}"
)
# grab the input tensor for this model to
# expose as a streaming input at the end
inputs.append(model.inputs["x"])
# add the model output as an output on the
# entire ensemble and give it a unique key
ensemble.add_output(model.outputs["y"], key=f"y_{i}")
# now expose a streaming input for all
# the models at the front of the ensemble
ensemble.add_streaming_inputs(
inputs, stream_size=10, streams_per_gpu=streams_per_gpu
)
# export a "version" of this ensemble, which
# will just create a version directory and
# write an empty file it, as well as write the config
ensemble.export_version(None)
# return the repository so we can delete it when we're done
return model_repository
def build_cluster(
cluster_name: str,
cluster_zone: str,
num_nodes: int = 2,
gpus_per_node: int = 4,
vcpus_per_node: int = 16,
gpu_type: str = "t4",
credentials: Optional[str] = None,
) -> cb.Cluster:
"""Start a GKE cluster and add a GPU node pool to it
Start a GKE cluster using the specified credentials.
If `credentials` is left as `None`, the path to a
service account JSON will be looked for using the
environment variable `GOOGLE_APPLICATION_CREDENTIALS`.
Args:
cluster_name:
The name to assign to the new cluster
cluster_zone:
The region in which to build the cluster
num_nodes:
The number of GPU-enabled nodes to attach to
the cluster once it's started
gpus_per_node:
The number of GPUs to attach to each node
created in the node pool
vcpus_per_node:
The number of VCPUs to attach to each node
created in the node pool
gpu_type:
The type of GPU to use for inference
credentials:
Either a string a specifying a path to a user-managed
Google Cloud service account key, or `None`, in which
case such a string should be attached to the
environment variable `GOOGLE_APPLICATION_CREDENTIALS`
Returns:
The `Cluster` object representing the new cluster
"""
# instantiate a manager with credentials which
# we can use to create a new cluster
manager = cb.ClusterManager(zone=cluster_zone, credentials=credentials)
# create a description of the cluster that
# we want to create, starting with a vanilla
# default node pool for cluster management
cluster_config = container.Cluster(
name=cluster_name,
node_pools=[
container.NodePool(
name="default-pool",
initial_node_count=1,
config=container.NodeConfig(),
)
],
)
# create the cluster using the manager and
# then wait for it to be ready
cluster = manager.add(cluster_config)
cluster.wait_for_ready()
# once it's ready, deploy a daemon set which
# installs and exposes the GPU drivers to
# containers on each node
cluster.deploy_gpu_drivers()
# describe a GPU-enabled set of nodes
# to attach to the cluster for inference
node_pool_config = container.NodePool(
name=f"tritonserver-{gpu_type}-pool",
initial_node_count=num_nodes,
config=cb.create_gpu_node_pool_config(
vcpus=vcpus_per_node, gpus=gpus_per_node, gpu_type=gpu_type
),
)
# use the cluster to create this node pool and
# wait for it to become ready to use
node_pool = cluster.add(node_pool_config)
node_pool.wait_for_ready()
# return the cluster for deletion later
return cluster
def do_inference(
num_updates: int,
server_url: str,
model_name: str,
num_models: int,
sequence_ids: Sequence[int],
model_version: int = 1,
request_rate: float = 50,
) -> np.ndarray:
"""Perform asynchronous inference on the provided dataset
Use an inference service hosted at `server_url` to perform
inference on some data in batches.
Args:
num_samples:
The number of samples to do inference on
server_url:
The URL at which the inference service is
awaiting requests
model_name:
The name of the model on the inference service
to load and use for inference
num_models:
The number of output models to pull from the ensemble
sequence_ids:
The ids identifying sequences from which to make requests
model_version:
Which version of the specified model to use for inference
request_rate:
How quickly to send requests to the server. Tune to your
network speed
Returns:
An array representing the inference outputs for
each sample in the dataset.
"""
# instantiate a client that points to the appropriate address
client = triton.InferenceServerClient(server_url)
if not client.is_server_live():
raise RuntimeError("Server isn't ready!")
# manually load in the model, this way we can do things
# like dynamically scale the amount of parallelism
client.load_model(model_name)
if not client.is_model_ready(model_name):
raise RuntimeError("Model isn't ready!")
# infer information about the model inputs
# by querying the server for metadata
metadata = client.get_model_metadata(model_name)
input = metadata.inputs[0]
input_shape = [i if i != -1 else 1 for i in input.shape]
input = triton.InferInput(input.name, input_shape, input.datatype)
# set things up to use threading for asynchronous
# request generation and handling. Include a progress
# bar to keep track of how long things are taking
q, e = Queue(), Event()
progbar = Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
ThroughputColumn(),
TimeRemainingColumn(),
)
N = len(sequence_ids) * num_updates
submit_task_id = progbar.add_task("Submitting requests", total=N)
infer_task_id = progbar.add_task("Collecting results", total=N)
# asynchronous requests require a callback function
# that handles the response from the server in a
| |
<gh_stars>1-10
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""SX1276_77_78_79: 137 MHz to 1020 MHz Low Power Long Range Transceiver featuring the LoRa (TM) long range modem"""
__author__ = "ChISL"
__copyright__ = "TBD"
__credits__ = ["Semtech"]
__license__ = "TBD"
__version__ = "0.1"
__maintainer__ = "https://chisl.io"
__email__ = "<EMAIL>"
__status__ = "Test"
from SX1276_77_78_79_constants import *
# name: SX1276_77_78_79
# description: 137 MHz to 1020 MHz Low Power Long Range Transceiver featuring the LoRa (TM) long range modem
# manuf: Semtech
# version: 0.1
# url: http://www.semtech.com/images/datasheet/sx1276_77_78_79.pdf
# date: 2016-08-01
# Derive from this class and implement read and write
class SX1276_77_78_79_Base:
"""137 MHz to 1020 MHz Low Power Long Range Transceiver featuring the LoRa (TM) long range modem"""
# Register Fifo
# LoRaTM base-band FIFO data input/output.
# FIFO is cleared an not accessible when device is in SLEEP mode
def setFifo(self, val):
"""Set register Fifo"""
self.write(REG.Fifo, val, 8)
def getFifo(self):
"""Get register Fifo"""
return self.read(REG.Fifo, 8)
# Bits Fifo
# Register OpMode
def setOpMode(self, val):
"""Set register OpMode"""
self.write(REG.OpMode, val, 8)
def getOpMode(self):
"""Get register OpMode"""
return self.read(REG.OpMode, 8)
# Bits LongRangeMode
# 0 = FSK/OOK Mode
# 1 = LoRaTM Mode
# This bit can be modified only in Sleep mode. A write operation on other device modes is ignored.
# Bits AccessSharedReg
# This bit operates when device is in Lora mode; if set it allows access to FSK registers page located in address space (0x0D:0x3F) while in LoRa mode
# 0 = Access LoRa registers page 0x0D: 0x3F
# 1 = Access FSK registers page (in mode LoRa) 0x0D: 0x3F
# Bits reserved_0
# reserved
# Bits LowFrequencyModeOn
# Access Low Frequency Mode registers
# Bits Mode
# Device modes
# Register Fr
# RF carrier frequency
# Resolution is 61.035 Hz if F(XOSC) = 32 MHz.
# Default value is 0x6c8000 = 434 MHz.
# Register values must be modified only when device is in SLEEP or STAND-BY mode.
def setFr(self, val):
"""Set register Fr"""
self.write(REG.Fr, val, 24)
def getFr(self):
"""Get register Fr"""
return self.read(REG.Fr, 24)
# Bits Fr
# Register PaConfig
def setPaConfig(self, val):
"""Set register PaConfig"""
self.write(REG.PaConfig, val, 8)
def getPaConfig(self):
"""Get register PaConfig"""
return self.read(REG.PaConfig, 8)
# Bits PaSelect
# Selects PA output pin
# Bits MaxPower
# Select max output power: Pmax=10.8+0.6*MaxPower [dBm]
# Bits OutputPower
# Pout=Pmax-(15-OutputPower) if PaSelect = 0 (RFO pin)
# Pout=17-(15-OutputPower) if PaSelect = 1 (PA_BOOST pin)
# Register PaRamp
def setPaRamp(self, val):
"""Set register PaRamp"""
self.write(REG.PaRamp, val, 8)
def getPaRamp(self):
"""Get register PaRamp"""
return self.read(REG.PaRamp, 8)
# Bits unused_0
# unused
# Bits reserved_1
# Bits PaRamp
# Rise/Fall time of ramp up/down in FSK 0000 = 3.4 ms
# Register Ocp
def setOcp(self, val):
"""Set register Ocp"""
self.write(REG.Ocp, val, 8)
def getOcp(self):
"""Get register Ocp"""
return self.read(REG.Ocp, 8)
# Bits unused_0
# Bits OcpOn
# Enables overload current protection (OCP) for PA:
# 0 = OCP disabled
# 1 = OCP enabled
# Bits OcpTrim
# Trimming of OCP current:
# Imax = 45+5*OcpTrim [mA] if OcpTrim <= 15 (120 mA)
# Imax = -30+10*OcpTrim [mA] if 15 < OcpTrim <= 27 (130 to 240 mA)
# Imax = 240mA for higher settings
# Default Imax = 100mA
# Register Lna
def setLna(self, val):
"""Set register Lna"""
self.write(REG.Lna, val, 8)
def getLna(self):
"""Get register Lna"""
return self.read(REG.Lna, 8)
# Bits LnaGain
# LNA gain setting:
# b000 not used
# b111 not used
# Bits LnaBoostLf
# Low Frequency (RFI_LF) LNA current adjustment
# 00 = Default LNA current
# Other = Reserved
# Bits reserved_0
# Bits LnaBoostHf
# High Frequency (RFI_HF) LNA current adjustment
# 0 = Default; -- Default LNA current
# 1 = Boost_on; -- Boost on, 150% LNA current
# Register FifoAddrPtr
# SPI interface address pointer in FIFO data buffer.
def setFifoAddrPtr(self, val):
"""Set register FifoAddrPtr"""
self.write(REG.FifoAddrPtr, val, 8)
def getFifoAddrPtr(self):
"""Get register FifoAddrPtr"""
return self.read(REG.FifoAddrPtr, 8)
# Bits FifoAddrPtr
# Register FifoTxBaseAddr
# write base address in FIFO data buffer for TX modulator
def setFifoTxBaseAddr(self, val):
"""Set register FifoTxBaseAddr"""
self.write(REG.FifoTxBaseAddr, val, 8)
def getFifoTxBaseAddr(self):
"""Get register FifoTxBaseAddr"""
return self.read(REG.FifoTxBaseAddr, 8)
# Bits FifoTxBaseAddr
# Register FifoRxBaseAddr
# read base address in FIFO data buffer for RX demodulator
def setFifoRxBaseAddr(self, val):
"""Set register FifoRxBaseAddr"""
self.write(REG.FifoRxBaseAddr, val, 8)
def getFifoRxBaseAddr(self):
"""Get register FifoRxBaseAddr"""
return self.read(REG.FifoRxBaseAddr, 8)
# Bits FifoRxBaseAddr
# Register FifoRxCurrentAddr
# Start address (in data buffer) of last packet received
def setFifoRxCurrentAddr(self, val):
"""Set register FifoRxCurrentAddr"""
self.write(REG.FifoRxCurrentAddr, val, 8)
def getFifoRxCurrentAddr(self):
"""Get register FifoRxCurrentAddr"""
return self.read(REG.FifoRxCurrentAddr, 8)
# Bits FifoRxCurrentAddr
# Register IrqFlagsMask
def setIrqFlagsMask(self, val):
"""Set register IrqFlagsMask"""
self.write(REG.IrqFlagsMask, val, 8)
def getIrqFlagsMask(self):
"""Get register IrqFlagsMask"""
return self.read(REG.IrqFlagsMask, 8)
# Bits RxTimeoutMask
# Timeout interrupt mask: setting this bit masks the corresponding IRQ in RegIrqFlags
# Bits RxDoneMask
# Packet reception complete interrupt mask: setting this bit masks the
# corresponding IRQ in RegIrqFlags
# Bits PayloadCrcErrorMask
# Payload CRC error interrupt mask: setting this bit masks the
# corresponding IRQ in RegIrqFlags
# Bits ValidHeaderMask
# Valid header received in Rx mask: setting this bit masks the
# corresponding IRQ in RegIrqFlags
# Bits TxDoneMask
# FIFO Payload transmission complete interrupt mask: setting this bit masks
# the corresponding IRQ in RegIrqFlags
# Bits CadDoneMask
# CAD complete interrupt mask: setting this bit masks the corresponding
# IRQ in RegIrqFlags
# Bits FhssChangeChannelMask
# FHSS change channel interrupt mask: setting this bit masks the
# corresponding IRQ in RegIrqFlags
# Bits CadDetectedMask
# Cad Detected Interrupt Mask: setting this bit masks the corresponding
# IRQ in RegIrqFlags
# Register IrqFlags
def setIrqFlags(self, val):
"""Set register IrqFlags"""
self.write(REG.IrqFlags, val, 8)
def getIrqFlags(self):
"""Get register IrqFlags"""
return self.read(REG.IrqFlags, 8)
# Bits RxTimeout
# Timeout interrupt: writing a 1 clears the IRQ
# Bits RxDone
# Packet reception complete interrupt: writing a 1 clears the IRQ
# Bits PayloadCrcError
# Payload CRC error interrupt: writing a 1 clears the IRQ
# Bits ValidHeader
# Valid header received in Rx: writing a 1 clears the IRQ
# Bits TxDone
# FIFO Payload transmission complete interrupt: writing a 1 clears the IRQ
# Bits CadDone
# CAD complete: write to clear: writing a 1 clears the IRQ
# Bits FhssChangeChannel
# FHSS change channel interrupt: writing a 1 clears the IRQ
# Bits CadDetected
# Valid Lora signal detected during CAD operation: writing a 1 clears the IRQ
# Register RxNbBytes
# Number of payload bytes of latest packet received
def setRxNbBytes(self, val):
"""Set register RxNbBytes"""
self.write(REG.RxNbBytes, val, 8)
def getRxNbBytes(self):
"""Get register RxNbBytes"""
return self.read(REG.RxNbBytes, 8)
# Bits RxNbBytes
# Register RxHeaderCntValue
# Number of valid headers received since last transition into Rx mode.
# Header and packet counters are reseted in Sleep mode.
def setRxHeaderCntValue(self, val):
"""Set register RxHeaderCntValue"""
self.write(REG.RxHeaderCntValue, val, 16)
def getRxHeaderCntValue(self):
"""Get register RxHeaderCntValue"""
return self.read(REG.RxHeaderCntValue, 16)
# Bits RxHeaderCntValue
# Register RxPacketCntValue
# Number of valid packets received since last transition into Rx mode.
# Header and packet counters are reseted in Sleep mode.
def setRxPacketCntValue(self, val):
"""Set register RxPacketCntValue"""
self.write(REG.RxPacketCntValue, val, 16)
def getRxPacketCntValue(self):
"""Get register RxPacketCntValue"""
return self.read(REG.RxPacketCntValue, 16)
# Bits RxPacketCntValue
# Register ModemStat
def setModemStat(self, val):
"""Set register ModemStat"""
self.write(REG.ModemStat, val, 8)
def getModemStat(self):
"""Get register ModemStat"""
return self.read(REG.ModemStat, 8)
# Bits RxCodingRate
# Coding rate of last header received
# Bits ModemClear
# Modem clear
# Bits HeaderInfoValid
# Header info valid
# Bits RxOngoing
# RX on-going
# Bits SignalSynchronized
# Signal synchronized
# Bits SignalDetected
# Signal detected
# Register PktSnrValue
# Estimation of SNR on last packet received.In two's compliment format mutiplied by 4.
# SNR[dB] = PacketSnr[twos complement-] / 4
def setPktSnrValue(self, val):
"""Set register PktSnrValue"""
self.write(REG.PktSnrValue, val, 8)
def getPktSnrValue(self):
"""Get register PktSnrValue"""
return self.read(REG.PktSnrValue, 8)
# Bits PktSnrValue
# Register PktRssiValue
# RSSI of the latest packet received (dBm):
# RSSI[dBm] = -157 + Rssi (using HF output port, SNR >= 0) or
# RSSI[dBm] = -164 + Rssi (using LF output port, SNR >= 0)
# (see section 5.5.5 for details)
def setPktRssiValue(self, val):
"""Set register PktRssiValue"""
self.write(REG.PktRssiValue, val, 8)
def getPktRssiValue(self):
"""Get register PktRssiValue"""
return self.read(REG.PktRssiValue, 8)
# Bits PktRssiValue
# Register RssiValue
# Current RSSI value (dBm)
# RSSI[dBm] = -157 + Rssi (using HF output port) or
# RSSI[dBm] = -164 + Rssi (using LF output port)
# (see section 5.5.5 for details)
def setRssiValue(self, val):
"""Set register RssiValue"""
self.write(REG.RssiValue, val, 8)
def getRssiValue(self):
"""Get register RssiValue"""
return self.read(REG.RssiValue, 8)
# Bits RssiValue
# Register HopChannel
def setHopChannel(self, val):
"""Set register HopChannel"""
self.write(REG.HopChannel, val, 8)
def getHopChannel(self):
"""Get register HopChannel"""
return self.read(REG.HopChannel, 8)
# Bits PllTimeout
# PLL failed to lock while attempting a TX/RX/CAD operation 1 = PLL did not lock
# 0 = PLL did lock
# Bits CrcOnPayload
# CRC Information extracted from the received packet header (Explicit header mode only)
# 0 = Header indicates CRC off
# 1 = Header indicates CRC on
# Bits FhssPresentChannel
# Current value of frequency hopping channel in use.
# Register ModemConfig1
def setModemConfig1(self, val):
"""Set register ModemConfig1"""
self.write(REG.ModemConfig1, val, 8)
def getModemConfig1(self):
"""Get register ModemConfig1"""
return self.read(REG.ModemConfig1, 8)
# Bits BW
# Signal bandwidth:
# In the lower band (169MHz), signal bandwidths 8&9 are not supported)
# other values = reserved
# Bits CodingRate
# Error coding rate
# In implicit header mode should be set on receiver to determine
# expected coding | |
import argparse
import datetime
import json
import logging
import math
import os
import re
import time
import numpy as np
import pandas as pd
import requests
import xmltodict
from dateutil.parser import parse
from fvhdms import (
save_df, get_default_argumentparser, parse_args,
user_agent, dataframe_into_influxdb,
parse_times
)
USER_AGENT = user_agent('0.0.2', subdir='FmiAPI')
TIME_FMT = '%Y-%m-%dT%H:%MZ'
STATIONS_URL = 'https://opendata.fmi.fi/wfs/fin?service=WFS&version=2.0.0&request=GetFeature&storedquery_id=fmi::ef::stations&networkid='
"""
# Example request URL
https://opendata.fmi.fi/wfs?request=getFeature&storedquery_id=urban::observations::airquality::hourly::multipointcoverage&geoId=-106948
# List available weathern stations
python fmiapi.py -st 20200510T08:00:00Z -et 20200510T20:00:00Z --storedquery fmi::observations::weather::multipointcoverage -i fmisid --stationids 100971 101004 --list hels
name fmisid latlon type region
17 Helsinki Harmaja 100996 60.105120,24.975390 Automaattinen sääasema Helsinki
18 Helsinki Helsingin majakka 101003 59.948981,24.926311 Automaattinen sääasema Helsinki
19 Helsinki Kaisaniemi 100971 60.175230,24.944590 Automaattinen sääasema Helsinki
20 Helsinki Kaivopuisto 132310 60.153630,24.956220 Mareografiasema Helsinki
21 Helsinki Kumpula 101004 60.203071,24.961305 Aut,Sad,Aur,Ilm,Rad,Rev,Tut Helsinki
22 Helsinki Malmi lentokenttä 101009 60.252990,25.045490 Automaattinen sääasema Helsinki
23 Helsinki Vuosaari Käärmeniementie 103943 60.219350,25.172675 Aut,Tes Helsinki
24 Helsinki Vuosaari satama 151028 60.208670,25.195900 Automaattinen sääasema Helsinki
177 Vantaa Helsinki-Vantaan lentoasema 100968 60.326700,24.956750 Aut,Sää,Aur,Tes Vantaa
# List available air quality stations (networkid=151)
python fmiapi.py -st 20200510T08:00:00Z -et 20200510T20:00:00Z --storedquery fmi::observations::weather::multipointcoverage -i fmisid --stationids 100971 101004 --list hels --stationtype 151
name fmisid latlon type region
7 Helsinki Kallio 2 100662 60.187390,24.950600 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
8 Helsinki Länsisatama 4 106948 60.155210,24.921780 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
9 Helsink<NAME> 100742 60.169640,24.939240 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
10 Helsinki Mäkelänkatu 100762 60.196440,24.951980 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
11 Helsinki Paloheinä 107165 60.250040,24.939420 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
12 Helsinki Pirkkola 106950 60.234220,24.922320 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
13 Helsinki Vartiokylä Huivipolku 100803 60.223930,25.102440 Kolmannen osapuolen ilmanlaadun havaintoasema Helsinki
27 Järvenpää Helsingintie 103154 60.471320,25.089670 Kolmannen osapuolen ilmanlaadun havaintoasema Järvenpää
All stations:
https://ilmatieteenlaitos.fi/havaintoasemat
Example request for weather observations:
http://opendata.fmi.fi/wfs?request=GetFeature&storedquery_id=fmi::observations::weather::multipointcoverage&fmisid=100971×tep=10
http://opendata.fmi.fi/wfs?request=GetFeature&storedquery_id=fmi::observations::weather::multipointcoverage&geoid=-16000150×tep=10
"""
def parse_fmi_args() -> argparse.Namespace:
parser = get_default_argumentparser()
parser.add_argument('--cachefile', action='store_true', help='Store response data locally as a file')
parser.add_argument("--timestep", dest="timestep", choices=['10', '60'],
default='60', help="timestep parameter value in FMI URL")
parser.add_argument('--wait', type=float, default=1,
help='Time to wait (in seconds) between requests')
parser.add_argument('--storedquery', help='Stored query. Must be multipointcoverage type',
default='urban::observations::airquality::hourly::multipointcoverage')
parser.add_argument('--stationids', required=True, nargs='+', default=[],
help='FMISID, see possible values with --list [search_re] argument')
parser.add_argument('-i', '--idfield', required=True, default='geoid', choices=['geoid', 'fmisid'],
help='Id parameter name')
parser.add_argument('--extraparams', nargs='+', default=[],
help='Additional parameters to output json in "key1=val1 [key2=val2 key3=val3 ...]" format')
parser.add_argument('-n', '--nocache', action='store_true', help='Do not use cached xml data')
parser.add_argument('--list', nargs='?', const='',
help='List available stations with optional regex')
parser.add_argument('--stationtype', default='121', choices=['121', '151'],
help='Weather station (121) or airquality station (151')
args = parse_args(parser)
return args
def get_fmi_api_url(args: dict, geoid: str, storedquery: str,
starttime: datetime.datetime, endtime: datetime.datetime) -> str:
s_str = starttime.strftime(TIME_FMT)
e_str = endtime.strftime(TIME_FMT)
idfield = args['idfield']
timestep = args['timestep']
if idfield == 'fmisid' or geoid.startswith('-'):
prefix = ''
else:
prefix = '-'
url = f'https://opendata.fmi.fi/wfs?' \
f'request=getFeature&storedquery_id={storedquery}&' \
f'{idfield}={prefix}{geoid}&startTime={s_str}&endTime={e_str}×tep={timestep}'
logging.info(f'Fetching data from: {url}')
return url
def get_data_from_fmi_fi(args: dict, geoid: str, storedquery: str,
starttime: datetime.datetime, endtime: datetime.datetime) -> str:
s_str = starttime.strftime(TIME_FMT)
e_str = endtime.strftime(TIME_FMT)
url = get_fmi_api_url(args, geoid, storedquery, starttime, endtime)
fname = 'fmi_{}_{}-{}.xml'.format(geoid, s_str.replace(':', ''), e_str.replace(':', ''))
if os.path.isfile(fname) and args['nocache'] is False:
logging.info(f'Cache file already exists: {fname}')
else:
# TODO: do error handling here
res = requests.get(url)
if res.status_code != 200:
logging.error(f'FMI API returned {res.status_code}! Check file {fname} for errors.')
logging.info(f'Saving to cache file: {fname}')
with open(fname, 'wt') as f:
f.write(res.text)
return fname
def fmi_xml_to_dict(fname):
with open(fname, 'rt') as f:
d = xmltodict.parse(f.read())
return d
def get_fmi_data_week_max(args: dict, geoid: str, storedquery: str,
starttime: datetime.datetime, endtime: datetime.datetime) -> tuple:
fmi_xml = get_data_from_fmi_fi(args, geoid, storedquery, starttime, endtime)
d = fmi_xml_to_dict(fmi_xml)
# TODO: remove fmi_xml
# Base element for all interesting data
try:
base = d["wfs:FeatureCollection"]["wfs:member"]["omso:GridSeriesObservation"]
except KeyError as err:
if 'ExceptionReport' in d:
msg = 'FMI sent us an exception:\n'
msg += '\n'.join(d['ExceptionReport']['Exception']['ExceptionText'])
logging.warning(msg)
else:
raise # Catch this in calling function and continue
# Name & location
base_position = base["om:featureOfInterest"]["sams:SF_SpatialSamplingFeature"]["sams:shape"]["gml:MultiPoint"][
"gml:pointMember"]["gml:Point"]
name = base_position["gml:name"]
lat, lon = [float(x) for x in base_position["gml:pos"].split(' ')]
# Timestamps
raw_ts = base["om:result"]["gmlcov:MultiPointCoverage"]["gml:domainSet"]["gmlcov:SimpleMultiPoint"][
"gmlcov:positions"]
# Datalines, values are space separated
raw_dl = base["om:result"]["gmlcov:MultiPointCoverage"]["gml:rangeSet"]["gml:DataBlock"][
"gml:doubleOrNilReasonTupleList"]
# Data types, list of swe:field elements
raw_dt = base["om:result"]["gmlcov:MultiPointCoverage"]["gmlcov:rangeType"]["swe:DataRecord"]['swe:field']
data_names = [x['@name'] for x in raw_dt]
timestamp_lines = [int(a.split()[2]) for a in raw_ts.strip().splitlines()]
raw_data_lines = raw_dl.splitlines()
data_lines = []
for raw_data_line in raw_data_lines:
# Convert all numeric values to floats and NaN to None
data_values = [x if not math.isnan(float(x)) else None for x in raw_data_line.strip().split(' ')]
# Create list of key value pairs
keyvalues = list(zip(data_names, data_values))
data_lines.append(keyvalues)
return name, lat, lon, timestamp_lines, data_lines
def get_fmi_data(args: dict, geoid: str, storedquery: str,
starttime: datetime.datetime, endtime: datetime.datetime) -> dict:
name, lat, lon, t_timestamp_lines, t_data_lines = None, None, None, [], []
temp_starttime = starttime
timestamp_lines = []
data_lines = []
while temp_starttime <= endtime:
temp_endtime = temp_starttime + datetime.timedelta(hours=7 * 24)
if temp_endtime > endtime:
temp_endtime = endtime
logging.debug(f'Getting time period {temp_starttime} - {temp_endtime}')
try:
(name, lat, lon,
t_timestamp_lines, t_data_lines) = get_fmi_data_week_max(args, geoid, storedquery,
temp_starttime, temp_endtime)
except KeyError as err:
logging.warning(f'Got KeyError with missing key {err}, ignoring this data')
temp_starttime = temp_starttime + datetime.timedelta(hours=7 * 24)
continue
timestamp_lines += t_timestamp_lines
data_lines += t_data_lines
temp_starttime = temp_starttime + datetime.timedelta(hours=7 * 24)
logging.debug('Sleeping')
time.sleep(args['wait'])
parsed_lines = []
for i in range(len(timestamp_lines)):
timestmap = datetime.datetime.utcfromtimestamp(timestamp_lines[i])
data = []
# Convert null values to NaNs
for d in data_lines[i]:
if d[1] is not None:
data.append([d[0], float(d[1])])
else:
data.append([d[0], np.nan])
parsed_line = {
'time': timestmap.isoformat() + 'Z',
'data': data
}
parsed_lines.append(parsed_line)
data = {
'devid': str(geoid),
'name': name,
'location': {'type': 'Point', 'coordinates': [lon, lat]},
'datalines': parsed_lines,
}
if args['extraparams']:
data.update(dict([x.split('=') for x in args['extraparams']]))
return data
def get_multi_fmi_data(args: dict, start_time: datetime.datetime, end_time: datetime.datetime) -> pd.DataFrame:
"""Loop all FMI measuring station ids and get their data from FMI API
:param dict args: Arguments
:param datetime.datetime start_time: Data period start
:param datetime.datetime end_time: Data period end
:return: pd.DataFrame containing all of the data
"""
storedquery = args['storedquery']
df_all = []
for stationid in args['stationids']:
times = []
cols = {'dev-id': []}
data = get_fmi_data(args, stationid, storedquery, start_time, end_time)
for dl in data['datalines']:
times.append(parse(dl['time']))
cols['dev-id'].append(stationid)
for d in dl['data']:
if d[0] in cols:
cols[d[0]].append(d[1])
else:
cols[d[0]] = [d[1]]
df = pd.DataFrame(cols, index=times)
df.index.name = 'time'
df_all.append(df)
if len(df_all) > 1:
df = pd.concat(df_all)
else:
df = df_all[0]
df = df.sort_index()
return df
def list_fmi_stations(args: dict):
search_re = args.get('list')
url = STATIONS_URL + args['stationtype']
print(url)
exit()
res = requests.get(url)
data = xmltodict.parse(res.text)
s_list = data['wfs:FeatureCollection']['wfs:member']
cols = {
'fmisid': [],
'latlon': [],
'type': [],
'wmo': [],
}
gml_names = {'name', 'geoid', 'wmo', 'region', 'country'}
for l in s_list:
tmp_names = gml_names.copy()
obj = l['ef:EnvironmentalMonitoringFacility']
# print(json.dumps(obj, indent=1))
cols['fmisid'].append(obj['gml:identifier']['#text'])
cols['latlon'].append(obj['ef:representativePoint']['gml:Point']['gml:pos'].replace(' ', ','))
types = []
if isinstance(obj['ef:belongsTo'], list):
for asd in obj['ef:belongsTo']:
types.append(asd['@xlink:title'][:3])
else:
types.append(obj['ef:belongsTo']['@xlink:title'])
cols['type'].append(','.join(types))
for name in obj['gml:name']:
col_name = name['@codeSpace'].split('/')[-1]
tmp_names.remove(col_name)
if col_name in cols:
cols[col_name].append(name['#text'])
else:
cols[col_name] = [name['#text']]
for col_name in tmp_names:
cols[col_name].append(None)
# Modify pd options to show full stations table without pandas optimal screen fitting logic
pd.set_option('display.max_rows', 300)
pd.set_option('display.max_columns', 10)
pd.set_option('display.width', 1000)
# Create DataFrame
df = pd.DataFrame(data=cols)
# Drop not unnecessary columns
df = df.drop(['country', 'wmo', 'geoid'], axis=1)
df = df.sort_values(by=['name']).reset_index(drop=True)
cols = list(df.columns)
cols_to_move = ['name'] # Move name to the beginning
[cols.remove(x) for x in cols_to_move if x in cols]
cols = cols_to_move + cols
df = df[cols]
# Print full DataFrame or just rows which match to regex
if search_re is None:
print(df)
else:
print(df[(df['name'].str.contains(search_re, flags=re.IGNORECASE)) | (
df['region'].str.contains(search_re, flags=re.IGNORECASE))])
def main():
args = vars(parse_fmi_args())
if args['list'] is not None:
list_fmi_stations(args)
exit()
start_time, end_time, time_length = parse_times(args)
# Currently request only time periods from full hour to full hour
start_time = start_time.replace(minute=0, second=0, microsecond=0)
end_time = end_time.replace(minute=0, second=0, microsecond=0)
df = get_multi_fmi_data(args, start_time, end_time)
print(df)
save_df(args, df) # Save to a file if --outfile argument is present
# FIXME: This doesn't work (trying to drop all rows where all cols are nan but dev-id
# df_id = df['dev-id']
# df = df.drop(columns=['dev-id']).dropna(how='all')
# df = df.join(df_id)
# print(df)
dataframe_into_influxdb(args, df, tag_columns=['dev-id'])
if __name__ == '__main__':
main()
"""
List stations one object example from:
https://opendata.fmi.fi/wfs/fin?service=WFS&version=2.0.0&request=GetFeature&storedquery_id=fmi::ef::stations&networkid=121&
{
"@gml:id": "<KEY>FFU.<KEY>",
"gml:identifier": {
"@codeSpace": "http://xml.fmi.fi/namespace/stationcode/fmisid",
"#text": "100908"
},
"gml:name": [
{
"@codeSpace": "http://xml.fmi.fi/namespace/locationcode/name",
"#text": "Parainen Ut\u00f6"
},
{
"@codeSpace": "http://xml.fmi.fi/namespace/locationcode/geoid",
"#text": "-16000054"
},
{
"@codeSpace": "http://xml.fmi.fi/namespace/locationcode/wmo",
"#text": "02981"
},
{
"@codeSpace": "http://xml.fmi.fi/namespace/location/region",
"#text": "Parainen"
},
{
"@codeSpace": "http://xml.fmi.fi/namespace/location/country",
"#text": "Suomi"
}
],
"ef:inspireId": {
"ins_base:Identifier": {
| |
user manual
'''
return c_SetUISlider(clientID, uiHandle, uiButtonID, position, operationMode)
def simxGetUIEventButton(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
uiEventButtonID = ct.c_int()
auxValues = (ct.c_int*2)()
ret = c_GetUIEventButton(clientID, uiHandle, ct.byref(uiEventButtonID), auxValues, operationMode)
arr = []
for i in range(2):
arr.append(auxValues[i])
return ret, uiEventButtonID.value, arr
def simxGetUIButtonProperty(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
prop = ct.c_int()
return c_GetUIButtonProperty(clientID, uiHandle, uiButtonID, ct.byref(prop), operationMode), prop.value
def simxSetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode)
def simxAddStatusbarMessage(clientID, message, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(message) is str):
message=message.encode('utf-8')
return c_AddStatusbarMessage(clientID, message, operationMode)
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
consoleHandle = ct.c_int()
if (sys.version_info[0] == 3) and (type(title) is str):
title=title.encode('utf-8')
if position != None:
c_position = (ct.c_int*2)(*position)
else:
c_position = None
if size != None:
c_size = (ct.c_int*2)(*size)
else:
c_size = None
if textColor != None:
c_textColor = (ct.c_float*3)(*textColor)
else:
c_textColor = None
if backgroundColor != None:
c_backgroundColor = (ct.c_float*3)(*backgroundColor)
else:
c_backgroundColor = None
return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, ct.byref(consoleHandle), operationMode), consoleHandle.value
def simxAuxiliaryConsoleClose(clientID, consoleHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleClose(clientID, consoleHandle, operationMode)
def simxAuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if (sys.version_info[0] == 3) and (type(txt) is str):
txt=txt.encode('utf-8')
return c_AuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode)
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode)
def simxGetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
eulerAngles = (ct.c_float*3)()
ret = c_GetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode)
arr = []
for i in range(3):
arr.append(eulerAngles[i])
return ret, arr
def simxGetObjectPosition(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = (ct.c_float*3)()
ret = c_GetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode)
arr = []
for i in range(3):
arr.append(position[i])
return ret, arr
def simxSetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
angles = (ct.c_float*3)(*eulerAngles)
return c_SetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, angles, operationMode)
def simxSetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_position = (ct.c_float*3)(*position)
return c_SetObjectPosition(clientID, objectHandle, relativeToObjectHandle, c_position, operationMode)
def simxSetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode)
def simxSetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if sys.version_info[0] == 3:
if type(upStateLabel) is str:
upStateLabel=upStateLabel.encode('utf-8')
if type(downStateLabel) is str:
downStateLabel=downStateLabel.encode('utf-8')
return c_SetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode)
def simxGetLastErrors(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
errors =[]
errorCnt = ct.c_int()
errorStrings = ct.POINTER(ct.c_char)()
ret = c_GetLastErrors(clientID, ct.byref(errorCnt), ct.byref(errorStrings), operationMode)
if ret == 0:
s = 0
for i in range(errorCnt.value):
a = bytearray()
while errorStrings[s] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(errorStrings[s],'big'))
else:
a.append(errorStrings[s])
s += 1
s += 1 #skip null
if sys.version_info[0] == 3:
errors.append(str(a,'utf-8'))
else:
errors.append(str(a))
return ret, errors
def simxGetArrayParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValues = (ct.c_float*3)()
ret = c_GetArrayParameter(clientID, paramIdentifier, paramValues, operationMode)
arr = []
for i in range(3):
arr.append(paramValues[i])
return ret, arr
def simxSetArrayParameter(clientID, paramIdentifier, paramValues, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_paramValues = (ct.c_float*3)(*paramValues)
return c_SetArrayParameter(clientID, paramIdentifier, c_paramValues, operationMode)
def simxGetBooleanParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = ct.c_ubyte()
return c_GetBooleanParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), bool(paramValue.value!=0)
def simxSetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetIntegerParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = ct.c_int()
return c_GetIntegerParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
def simxSetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetFloatingParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = ct.c_float()
return c_GetFloatingParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode), paramValue.value
def simxSetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetStringParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = ct.POINTER(ct.c_char)()
ret = c_GetStringParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode)
a = bytearray()
if ret == 0:
i = 0
while paramValue[i] != b'\0':
if sys.version_info[0] == 3:
a.append(int.from_bytes(paramValue[i],'big'))
else:
a.append(paramValue[i])
i=i+1
if sys.version_info[0] == 3:
a=str(a,'utf-8')
else:
a=str(a)
return ret, a
def simxGetCollisionHandle(clientID, collisionObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(collisionObjectName) is str):
collisionObjectName=collisionObjectName.encode('utf-8')
return c_GetCollisionHandle(clientID, collisionObjectName, ct.byref(handle), operationMode), handle.value
def simxGetCollectionHandle(clientID, collectionName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(collectionName) is str):
collectionName=collectionName.encode('utf-8')
return c_GetCollectionHandle(clientID, collectionName, ct.byref(handle), operationMode), handle.value
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = ct.c_int()
if (sys.version_info[0] == 3) and (type(distanceObjectName) is str):
distanceObjectName=distanceObjectName.encode('utf-8')
return c_GetDistanceHandle(clientID, distanceObjectName, ct.byref(handle), operationMode), handle.value
def simxReadCollision(clientID, collisionObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
collisionState = ct.c_ubyte()
return c_ReadCollision(clientID, collisionObjectHandle, ct.byref(collisionState), operationMode), bool(collisionState.value!=0)
def simxReadDistance(clientID, distanceObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
minimumDistance = ct.c_float()
return c_ReadDistance(clientID, distanceObjectHandle, ct.byref(minimumDistance), operationMode), minimumDistance.value
def simxRemoveObject(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveObject(clientID, objectHandle, operationMode)
def simxRemoveModel(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveModel(clientID, objectHandle, operationMode)
def simxRemoveUI(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveUI(clientID, uiHandle, operationMode)
def simxCloseScene(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_CloseScene(clientID, operationMode)
def simxGetObjects(clientID, objectType, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
objectCount = ct.c_int()
objectHandles = ct.POINTER(ct.c_int)()
ret = c_GetObjects(clientID, objectType, ct.byref(objectCount), ct.byref(objectHandles), operationMode)
handles = []
if ret == 0:
for i in range(objectCount.value):
handles.append(objectHandles[i])
return ret, handles
def simxDisplayDialog(clientID, titleText, mainText, dialogType, initialText, titleColors, dialogColors, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if titleColors != None:
c_titleColors = (ct.c_float*6)(*titleColors)
else:
c_titleColors = None
if dialogColors != None:
c_dialogColors = (ct.c_float*6)(*dialogColors)
else:
c_dialogColors = None
c_dialogHandle = ct.c_int()
c_uiHandle = ct.c_int()
if sys.version_info[0] == 3:
if type(titleText) is str:
titleText=titleText.encode('utf-8')
if type(mainText) is str:
mainText=mainText.encode('utf-8')
if type(initialText) is str:
initialText=initialText.encode('utf-8')
return c_DisplayDialog(clientID, titleText, mainText, dialogType, initialText, c_titleColors, c_dialogColors, ct.byref(c_dialogHandle), ct.byref(c_uiHandle), operationMode), c_dialogHandle.value, c_uiHandle.value
def simxEndDialog(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_EndDialog(clientID, dialogHandle, operationMode)
def simxGetDialogInput(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in | |
<reponame>benw1/WINGS
#!/usr/bin/env python
"""
Contains the DataProduct class definition
Please note that this module is private. The DataProduct class is
available in the main ``wpipe`` namespace - use that instead.
"""
from .core import os, shutil, datetime, si
from .core import return_dict_of_attrs, initialize_args, wpipe_to_sqlintf_connection, in_session
from .core import clean_path, remove_path, split_path
from .OptOwner import OptOwner
__all__ = ['DataProduct']
def _in_session(**local_kw):
return in_session('_%s' % split_path(__file__)[1].lower(), **local_kw)
class DataProduct(OptOwner):
"""
Represents a dataproduct owned by a pipeline, an input or a config.
Call signatures::
DataProduct(dpowner, filename, relativepath, group, data_type='',
subtype='', filtername='', ra=0, dec=0,
pointing_angle=0, options={})
DataProduct(keyid)
DataProduct(_dataproduct)
When __new__ is called, it queries the database for an existing
row in the `dataproducts` table via `sqlintf` using the given
signature. If the row exists, it retrieves its corresponding
`sqlintf.DataProduct` object, otherwise it creates a new row via a new
`sqlintf.DataProduct` instance. This `sqlintf.DataProduct` object is
then wrapped under the hidden attribute `DataProduct._dataproduct` in
the new instance of this `DataProduct` class generated by __new__.
All dataproducts are uniquely identified by their parent dpowner
(pipeline, input or configuration), their filename, and their group,
but alternatively, the constructor can take as sole argument either:
- the primary key id of the corresponding `dataproducts` table row
- the `sqlintf.DataProduct` object interfacing that table row
After the instantiation of __new__ is completed, if a dictionary of
options was given to the constructor, the __init__ method constructs
a set of Option objects owned by the dataproduct.
Parameters
----------
dpowner : Pipeline, Input or Configuration object
Parent Input or Configuration owning this dataproduct.
filename : string
Name of the file the dataproduct points to.
relativepath : string
Path of the directory in which the file the dataproduct points to
is located.
group : string
Group of the dataproduct ('raw', 'conf', 'log' or 'proc').
data_type : string
Type of the data - defaults to ''.
subtype : string
Subtype of the data - defaults to ''.
filtername : string
Name of the filter of the data - defaults to ''.
ra : int
Right ascension coordinate of the data - defaults to 0.
dec : int
Declination coordinate of the data - defaults to 0.
pointing_angle : int
Pointing angle coordinate of the data - defaults to 0.
options : dict
Dictionary of options to associate to the dataproduct.
keyid : int
Primary key id of the table row.
_dataproduct : sqlintf.DataProduct object exposing SQL interface
Corresponding sqlintf object interfacing the table row.
Attributes
----------
parents : Pipeline, Input or Configuration object
Points to attribute self.dpowner.
filename : string
Name of the file the dataproduct points to.
dp_id : int
Primary key id of the table row.
timestamp : datetime.datetime object
Timestamp of last access to table row.
relativepath : string
Path of the directory in which the file the dataproduct points to
is located.
path : string
Path where the file the dataproduct points to is located.
suffix : string
Extension of the file the dataproduct points to.
data_type : string
Type of the data.
subtype : string
Subtype of the data.
group : string
Group of the dataproduct ('raw', 'conf', 'log' or 'proc').
filtername : string
Name of the filter of the data.
ra : int
Right ascension coordinate of the data.
dec : int
Declination coordinate of the data.
pointing_angle : int
Pointing angle coordinate of the data.
dpowner_id : int
Primary key id of the table row of parent pipeline, input or
configuration.
config_id : int
Primary key id of the table row of parent configuration - raise an
AttributeError if the parent is not a Configuration object.
input_id : int
Primary key id of the table row of parent input - raise an
AttributeError if the parent is not an Input object.
pipeline_id : int
Primary key id of the table row of parent pipeline.
dpowner : Pipeline, Input or Configuration object
Pipeline, Input or Configuration object corresponding to parent
pipeline, input or configuration.
config : Configuration object
Configuration object corresponding to parent configuration - raise
an AttributeError if the parent is not a Configuration object.
input : Input object
Input object corresponding to parent input - raise an
AttributeError if the parent is not an Input object.
pipeline : Pipeline object
Pipeline object corresponding to parent pipeline.
target : Target object
Target object corresponding to parent target - raise
an AttributeError if the owner is not a Configuration object.
target_id : int
Primary key id of the table row of parent target - raise an
AttributeError if the owner is not a Configuration object.
optowner_id : int
Points to attribute dp_id.
options : core.DictLikeChildrenProxy object
Dictionary of Option objects owned by the target.
Notes
-----
A DataProduct object constructs from dataproduct owner, that can be
either a Pipeline, Input object or a Configuration object: this can be
achieved either by using the dataproduct generating object method of
such dataproduct owner object, or alternatively by using the
DataProduct class constructor giving it the dataproduct owner object
(Pipeline, Input or Configuration) as argument. In both cases, the
signature must also contain the filename of the data file as well as
its group:
>>> my_dp = my_pipe.dataproduct(filename, group)
or
>>> my_dp = my_input.dataproduct(filename, group)
or
>>> my_dp = my_config.dataproduct(filename, group)
or
>>> my_dp = wp.DataProduct(my_pipe, filename, group)
or
>>> my_dp = wp.DataProduct(my_input, filename, group)
or
>>> my_dp = wp.DataProduct(my_config, filename, group)
"""
def __new__(cls, *args, **kwargs):
# checking if given argument is sqlintf object or existing id
cls._dataproduct = args[0] if len(args) else None
if not isinstance(cls._dataproduct, si.DataProduct):
keyid = kwargs.get('id', cls._dataproduct)
if isinstance(keyid, int):
with si.begin_session() as session:
cls._dataproduct = session.query(si.DataProduct).filter_by(id=keyid).one()
else:
# gathering construction arguments
wpargs, args, kwargs = initialize_args(args, kwargs, nargs=9)
list(wpargs.__setitem__('DPOwner', wpargs[key]) for key in list(wpargs.keys())[::-1]
if (key in map(lambda obj: obj.__name__, si.DPOwner.__subclasses__())))
dpowner = kwargs.get('dpowner', wpargs.get('DPOwner', None))
filename = kwargs.get('filename', args[0])
relativepath = clean_path(kwargs.get('relativepath', args[1]))
group = kwargs.get('group', args[2])
data_type = kwargs.get('data_type', '' if args[3] is None else args[3])
subtype = kwargs.get('subtype', '' if args[4] is None else args[4])
filtername = kwargs.get('filtername', '' if args[5] is None else args[5])
ra = kwargs.get('ra', 0 if args[6] is None else args[6])
dec = kwargs.get('dec', 0 if args[7] is None else args[7])
pointing_angle = kwargs.get('pointing_angle', 0 if args[8] is None else args[8])
# querying the database for existing row or create
with si.begin_session() as session:
for retry in session.retrying_nested():
with retry:
this_nested = retry.retry_state.begin_nested()
cls._dataproduct = this_nested.session.query(si.DataProduct).with_for_update(). \
filter_by(dpowner_id=dpowner.dpowner_id). \
filter_by(group=group). \
filter_by(filename=filename).one_or_none()
if cls._dataproduct is None:
if '.' in filename:
_suffix = filename.split('.')[-1]
else:
_suffix = ' '
if _suffix not in ['fits', 'txt', 'head', 'cl',
'py', 'pyc', 'pl', 'phot', 'png', 'jpg', 'ps',
'gz', 'dat', 'lst', 'sh']:
_suffix = 'other'
cls._dataproduct = si.DataProduct(filename=filename,
relativepath=relativepath,
suffix=_suffix,
data_type=data_type,
subtype=subtype,
group=group,
filtername=filtername,
ra=ra,
dec=dec,
pointing_angle=pointing_angle)
dpowner._dpowner.dataproducts.append(cls._dataproduct)
this_nested.commit()
else:
this_nested.rollback()
retry.retry_state.commit()
# verifying if instance already exists and return
wpipe_to_sqlintf_connection(cls, 'DataProduct')
return cls._inst
@_in_session()
def __init__(self, *args, **kwargs):
if not hasattr(self, '_optowner'):
self._optowner = self._dataproduct
super(DataProduct, self).__init__(kwargs.get('options', {}))
@classmethod
def select(cls, **kwargs):
"""
Returns a list of DataProduct objects fulfilling the kwargs filter.
Parameters
----------
kwargs
Refer to :class:`sqlintf.DataProduct` for parameters.
Returns
-------
out : list of DataProduct object
list of objects fulfilling the kwargs filter.
"""
with si.begin_session() as session:
cls._temp = session.query(si.DataProduct).filter_by(**kwargs)
return list(map(cls, cls._temp.all()))
@property
def parents(self):
"""
:obj:`Pipeline`, :obj:`Input` or :obj:`Configuration`: Points to
attribute self.dpowner.
"""
return self.dpowner
@property
@_in_session()
def filename(self):
"""
str: Name of the file the dataproduct points to.
"""
self._session.refresh(self._dataproduct)
return self._dataproduct.filename
@filename.setter
@_in_session()
def filename(self, filename):
os.rename(self.relativepath + '/' + self._dataproduct.filename, self.relativepath + '/' + filename)
self._dataproduct.name = filename
self._dataproduct.timestamp = datetime.datetime.utcnow()
self._session.commit()
@property
def filesplitext(self):
return os.path.splitext(self.filename)
@property
@_in_session()
def dp_id(self):
"""
int: Primary key id of the table row.
"""
return self._dataproduct.id
@property
@_in_session()
def relativepath(self):
"""
str: Path of the directory in which the file the dataproduct points to
is located.
"""
return self._dataproduct.relativepath
@property
def path(self):
"""
str: Path | |
None
def read_buffer(self, buf, host_array, blocking=True, size=None, offset=0,
wait_for=None, need_event=False):
"""Copies from device buffer to host buffer.
Parameters:
buf: Buffer object.
host_array: numpy array.
blocking: if the read is blocking.
size: size in bytes to copy (None for entire numpy array).
offset: offset in the device buffer.
wait_for: list of the Event objects to wait.
need_event: return Event object or not.
Returns:
Event object or None if need_event == False.
"""
event = cl.ffi.new("cl_event[]", 1) if need_event else cl.NULL
wait_list, n_events = CL.get_wait_list(wait_for)
host_ptr, size = CL.extract_ptr_and_size(host_array, size)
n = self._lib.clEnqueueReadBuffer(
self.handle, buf.handle, blocking, offset, size, host_ptr,
n_events, wait_list, event)
if n:
raise CLRuntimeError("clEnqueueReadBuffer() failed with "
"error %s" % CL.get_error_description(n), n)
return Event(event[0]) if event != cl.NULL else None
def write_buffer(self, buf, host_array, blocking=True, size=None, offset=0,
wait_for=None, need_event=False):
"""Copies from host buffer to device buffer.
Parameters:
buf: Buffer object.
host_array: numpy array.
blocking: if the read is blocking.
size: size in bytes to copy (None for entire numpy array).
offset: offset in the device buffer.
wait_for: list of the Event objects to wait.
need_event: return Event object or not.
Returns:
Event object or None if need_event == False.
"""
event = cl.ffi.new("cl_event[]", 1) if need_event else cl.NULL
wait_list, n_events = CL.get_wait_list(wait_for)
host_ptr, size = CL.extract_ptr_and_size(host_array, size)
n = self._lib.clEnqueueWriteBuffer(
self.handle, buf.handle, blocking, offset, size, host_ptr,
n_events, wait_list, event)
if n:
raise CLRuntimeError("clEnqueueReadBuffer() failed with "
"error %s" % CL.get_error_description(n), n)
return Event(event[0]) if event != cl.NULL else None
def flush(self):
"""Flushes the queue.
"""
n = self._lib.clFlush(self.handle)
if n:
raise CLRuntimeError("clFlush() failed with error %s" %
CL.get_error_description(n), n)
def finish(self):
"""Waits for all previous commands issued to this queue to end.
"""
n = self._lib.clFinish(self.handle)
if n:
raise CLRuntimeError("clFinish() failed with error %s" %
CL.get_error_description(n), n)
def release(self):
if self.handle is not None:
self._lib.clReleaseCommandQueue(self.handle)
self._handle = None
def __del__(self):
self.release()
class Buffer(CL):
"""Holds OpenCL buffer.
Attributes:
context: Context object associated with this buffer.
flags: flags supplied for the creation of this buffer.
host_array: host array reference, such as numpy array,
will be stored only if flags include CL_MEM_USE_HOST_PTR.
size: size of the host array.
"""
def __init__(self, context, flags, host_array, size=None):
super(Buffer, self).__init__()
self._context = context
self._flags = flags
self._host_array = (host_array if flags & cl.CL_MEM_USE_HOST_PTR != 0
else None)
host_ptr, size = CL.extract_ptr_and_size(host_array, size)
err = cl.ffi.new("cl_int *")
self._handle = self._lib.clCreateBuffer(
context.handle, flags, size, host_ptr, err)
if err[0]:
self._handle = None
raise CLRuntimeError("clCreateBuffer() failed with error %s" %
CL.get_error_description(err[0]),
err[0])
@property
def context(self):
"""
Context object associated with this buffer.
"""
return self._context
@property
def flags(self):
"""
Flags supplied for the creation of this buffer.
"""
return self._flags
@property
def host_array(self):
"""
Host array reference, such as numpy array,
will be stored only if flags include CL_MEM_USE_HOST_PTR.
"""
return self._host_array
@property
def size(self):
"""
Size of the host array.
"""
return self._size
def release(self):
if self.handle is not None:
self._lib.clReleaseMemObject(self.handle)
self._handle = None
def __del__(self):
self.release()
class skip(object):
"""A marker to skip setting arguments in Kernel.set_args.
Passing in the class type makes set_args to skip setting one argument;
passing skip(n) makes set_args skip n arguments.
"""
def __init__(self, number):
self.number = number
@property
def number(self):
return self._number
@number.setter
def number(self, value):
if value < 1:
raise ValueError("number must be greater than 0")
self._number = value
class Kernel(CL):
"""Holds OpenCL kernel.
Attributes:
program: Program object associated with this kernel.
name: kernel name in the program.
"""
def __init__(self, program, name):
super(Kernel, self).__init__()
self._program = program
self._name = name
err = cl.ffi.new("cl_int *")
ss = cl.ffi.new("char[]", name.encode("utf-8"))
self._handle = self._lib.clCreateKernel(program.handle, ss, err)
if err[0]:
self._handle = None
raise CLRuntimeError("clCreateKernel() failed with error %s" %
CL.get_error_description(err[0]),
err[0])
@property
def program(self):
"""
Program object associated with this kernel.
"""
return self._program
@property
def name(self):
"""
kernel name in the program.
"""
return self._name
@property
def reference_count(self):
buf = cl.ffi.new("cl_uint *")
self._get_kernel_info(cl.CL_KERNEL_REFERENCE_COUNT, buf)
return buf[0]
@property
def num_args(self):
buf = cl.ffi.new("size_t *")
self._get_kernel_info(cl.CL_KERNEL_NUM_ARGS, buf)
return buf[0]
@property
def attributes(self):
buf = cl.ffi.new("char[]", 4096)
self._get_kernel_info(cl.CL_KERNEL_ATTRIBUTES, buf)
return cl.ffi.string(buf).decode("utf-8", "replace").strip()
def set_arg(self, idx, vle, size=None):
"""Sets kernel argument.
Parameters:
idx: index of the kernel argument (zero-based).
vle: kernel argument:
- for buffers should be an instance of Buffer,
- for scalars should be a numpy array slice
(k[0:1] for example),
- for NULL should be None,
- may be cffi pointer also, in such case size should be set.
size: size of the vle (may be None for buffers and scalars).
"""
if isinstance(vle, Buffer):
arg_value = cl.ffi.new("cl_mem[]", 1)
arg_value[0] = vle.handle
arg_size = cl.ffi.sizeof("cl_mem")
elif hasattr(vle, "__array_interface__"):
arg_value = cl.ffi.cast("const void*",
vle.__array_interface__["data"][0])
arg_size = vle.nbytes if size is None else size
elif vle is None:
arg_value = cl.NULL
arg_size = 0
elif type(vle) == type(cl.NULL): # cffi pointer
arg_value = cl.ffi.cast("const void*", vle)
if size is None:
raise ValueError("size should be set in case of cffi pointer")
arg_size = size
else:
raise ValueError("vle should be of type Buffer, "
"numpy array, cffi pointer or None "
"in Kernel::set_arg()")
n = self._lib.clSetKernelArg(self.handle, idx, arg_size, arg_value)
if n:
raise CLRuntimeError("clSetKernelArg(%d, %s) failed with error "
"%s" % (idx, repr(vle),
CL.get_error_description(n)),
n)
def set_args(self, *args):
skip_until = 0
for i, arg in enumerate(args):
if arg is skip:
continue
if isinstance(arg, skip):
skip_until = i + arg.number
if i < skip_until:
continue
if isinstance(arg, tuple) and len(arg) == 2:
self.set_arg(i, *arg)
else:
self.set_arg(i, arg)
def release(self):
if self.handle is not None:
self._lib.clReleaseKernel(self.handle)
self._handle = None
def _get_kernel_info(self, code, buf):
sz = cl.ffi.new("size_t *")
err = self._lib.clGetKernelInfo(self.handle, code,
cl.ffi.sizeof(buf), buf, sz)
if err:
raise CLRuntimeError("clGetKernelInfo() failed with error %s" %
CL.get_error_description(err), err)
return sz[0]
def __del__(self):
self.release()
class Program(CL):
"""Holds OpenCL program.
Attributes:
context: Context object associated with this program.
devices: list of Device objects associated with this program.
build_logs: list of program build logs (same length as devices list).
src: program source.
include_dirs: list of include dirs.
options: additional build options.
binary: False if the program should be created from source; otherwise,
src is interpreted as precompiled binaries iterable.
"""
def __init__(self, context, devices, src, include_dirs=(), options="",
binary=False):
super(Program, self).__init__()
self._context = context
self._devices = devices
self._src = src.encode("utf-8") if not binary else None
self._include_dirs = list(include_dirs)
self._options = options.strip().encode("utf-8")
self._build_logs = []
if not binary:
self._create_program_from_source()
else:
self._create_program_from_binary(src)
@property
def context(self):
"""
Context object associated with this program.
"""
return self._context
@property
def devices(self):
"""
List of Device objects associated with this program.
"""
return self._devices
@property
def build_logs(self):
"""
List of program build logs (same length as devices list).
"""
return self._build_logs
@property
def source(self):
"""
Program source.
"""
return self._src
@property
def include_dirs(self):
"""
List of include dirs.
"""
return self._include_dirs
@property
def options(self):
"""
Additional build options.
"""
return self._options
@property
def reference_count(self):
buf = cl.ffi.new("cl_uint *")
self._get_program_info(cl.CL_PROGRAM_REFERENCE_COUNT, buf)
return buf[0]
@property
def num_kernels(self):
buf = cl.ffi.new("size_t *")
self._get_program_info(cl.CL_PROGRAM_NUM_KERNELS, buf)
return buf[0]
@property
def kernel_names(self):
buf = cl.ffi.new("char[]", 4096)
self._get_program_info(cl.CL_PROGRAM_KERNEL_NAMES, buf)
names = cl.ffi.string(buf).decode("utf-8", "replace")
return names.split(';')
@property
def binaries(self):
sizes = cl.ffi.new("size_t[]", len(self.devices))
self._get_program_info(cl.CL_PROGRAM_BINARY_SIZES, sizes)
buf = cl.ffi.new("char *[]", len(self.devices))
bufr = [] # to hold the references to cffi arrays
for i in range(len(self.devices)):
bufr.append(cl.ffi.new("char[]", sizes[i]))
buf[i] = bufr[-1]
self._get_program_info(cl.CL_PROGRAM_BINARIES, buf)
bins = []
for i in range(len(self.devices)):
bins.append(bytes(cl.ffi.buffer(buf[i], sizes[i])[0:sizes[i]]))
del bufr
return bins
def get_kernel(self, name):
"""Returns Kernel object from its name.
"""
return Kernel(self, name)
def _get_program_info(self, code, buf):
sz = cl.ffi.new("size_t *")
err = self._lib.clGetProgramInfo(self.handle, code,
cl.ffi.sizeof(buf), buf, sz)
if err:
raise CLRuntimeError("clGetProgramInfo() failed with error %s" %
CL.get_error_description(err), err)
return sz[0]
def _get_build_logs(self, device_list):
del self.build_logs[:]
log = cl.ffi.new("char[]", 65536)
sz = cl.ffi.new("size_t *")
for dev in device_list:
e = self._lib.clGetProgramBuildInfo(
self.handle, dev, cl.CL_PROGRAM_BUILD_LOG, cl.ffi.sizeof(log),
log, sz)
if e or sz[0] <= 0:
self.build_logs.append("")
continue
self.build_logs.append(cl.ffi.string(log).decode("utf-8",
"replace"))
def _create_program_from_source(self):
err = cl.ffi.new("cl_int *")
srcptr = cl.ffi.new("char[]", self.source)
strings = cl.ffi.new("char*[]", 1)
strings[0] = srcptr
self._handle = self._lib.clCreateProgramWithSource(
self.context.handle, 1, strings, cl.NULL, err)
| |
import datetime
def now():
return datetime.datetime.utcnow()
print(now(),'Script Start')
import requests
import json
import csv
from bs4 import BeautifulSoup
from html_table_extractor.extractor import Extractor
from html.parser import HTMLParser
import xml.etree.ElementTree as ET
import twython
import os
import collections
consumer_key = os.environ['consumer_key']
consumer_secret = os.environ['consumer_secret']
access_token = os.environ['access_token']
access_token_secret = os.environ['access_token_secret']
from twython import Twython
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret)
def tweet(message):
twitter.update_status(status=message)
print("Tweeted: "+message)
def rem_dups(x):
return list(dict.fromkeys(x))
old_cancellations_data = []
with open('_data/cancellations.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
row = rem_dups(row)
old_cancellations_data.append(row)
print(now(),'cancellations.tsv read')
old_cancellations_data.remove(['Date','Event','Country','Cancellation Note','Website'])
states_list = []
with open('_data/raw/states.tsv','r', encoding='utf-8', newline='') as f:
tsv_reader = csv.reader(f, delimiter="\t")
for row in tsv_reader:
states_list.append(row)
print(now(),'raw/states.tsv read')
states_list.remove(['Event','Country','State','County'])
def same_week(dateString):
'''returns true if a dateString in %Y%m%d format is part of the current week'''
d1 = datetime.datetime.strptime(dateString,'%Y-%m-%d')
d2 = datetime.datetime.today()
return d1.isocalendar()[1] == d2.isocalendar()[1]
events = requests.get('https://images.parkrun.com/events.json').text
with open('_data/raw/events.json','wt', encoding='utf-8', newline='') as f:
f.write(json.dumps(json.loads(events), indent=2))
print(now(),"raw/events.json saved")
technical_event_info = requests.get('https://wiki.parkrun.com/index.php/Technical_Event_Information').text
#with open('_data/raw/tei.html','wt', encoding='utf-8', newline='') as f:
# f.write(technical_event_info)
# print(now(),"raw/tei.html saved")
cancellations = requests.get('https://wiki.parkrun.com/index.php/Cancellations/Global').text
#with open('_data/raw/cancellations.html','wt', encoding='utf-8', newline='') as f:
# f.write(cancellations)
# print(now(),"raw/cancellations.html saved")
events = json.loads(events)['events']
soup = BeautifulSoup(technical_event_info, 'html.parser')
extractor = Extractor(soup)
extractor.parse()
tei_table = extractor.return_list()
#print(now(),tei_table)
upcoming_events_table = []
upcoming_events = []
for i in tei_table:
out = []
for j in i:
j = j.strip()
out.append(j)
#print(now(),out)
if 'AcceptingRegistrations' in out:
upcoming_events.append(out[0])
upcoming_events_table.append(out)
#print(now(),upcoming_events)
soup = BeautifulSoup(cancellations, 'html.parser')
extractor = Extractor(soup)
extractor.parse()
cancellation_table = extractor.return_list()
cancellation_table.pop(-1)
cancellation_table.pop(0)
cancellations_data = []
cancellations_list = []
for i in range(len(cancellation_table)):
try:
for x in range(5):
cancellation_table[i][x] = cancellation_table[i][x].strip()
except IndexError:
break
if same_week(cancellation_table[i][0]) == True:
#print(now(),cancellation_table[i])
cancellations_data.append([cancellation_table[i][0],cancellation_table[i][1],cancellation_table[i][3],cancellation_table[i][4]])
cancellations_list.append(cancellation_table[i][1])
def sortByIndex0(e):
return e[0]
def sortByIndex1(e):
return e[1]
cancellation_table.sort(key=sortByIndex0)
cancellation_table.sort(key=sortByIndex1)
with open('_data/all-cancellations.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Date','Event','Region','Country','Cancellation Note'])
for i in cancellation_table:
tsv_writer.writerow(i)
print(now(),"all-cancellations.tsv saved")
cancellation_dates = []
new_states_list = []
x = 0
#upcoming_events.append('Central parkrun, Plymouth') #01/01/99 https://www.parkrun.org.uk/centralplymouth/
#upcoming_events.append('Church Mead parkrun') #01/01/99 https://www.parkrun.org.uk/churchmead/
#upcoming_events.append('Edgbaston Reservoir parkrun') #01/01/99 https://www.parkrun.org.uk/edgbastonreservoir/
#upcoming_events.append('Henlow Bridge Lakes parkrun') #01/01/99 https://www.parkrun.org.uk/henlowbridgelakes/
#upcoming_events.append('<NAME> parkrun') #01/01/99 https://www.parkrun.org.uk/penryncampus/
#upcoming_events.append('Roberts Park parkrun') #01/01/99 https://www.parkrun.org.uk/robertspark/
for parkrun in events['features']:
if parkrun['properties']['EventLongName'] in upcoming_events:
#print(now(),parkrun)
events['features'].remove(parkrun)
for parkrun in events['features']:
#print(now(),parkrun['properties']['EventLongName'])
if 'junior' in parkrun['properties']['EventLongName']:
if parkrun['properties']['EventLongName'] in cancellations_list:
parkrun['properties']['Status'] = 'junior Cancellation'
else:
parkrun['properties']['Status'] = 'junior parkrunning'
else:
if parkrun['properties']['EventLongName'] in cancellations_list:
parkrun['properties']['Status'] = '5k Cancellation'
else:
parkrun['properties']['Status'] = 'parkrunning'
parkrun['properties']['Cancellations'] = []
for cancellation in cancellation_table:
if parkrun['properties']['EventLongName'] == cancellation[1] and same_week(cancellation[0]) == True:
newcancellation = {'DateCancelled': cancellation[0], 'ReasonCancelled': cancellation[4]}
parkrun['properties']['Cancellations'].append(newcancellation)
cancellation_dates.append(cancellation[0])
if parkrun['properties']['countrycode'] == 3 :
parkrun['properties']['Website'] = 'https://www.parkrun.com.au/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Australia'
elif parkrun['properties']['countrycode'] == 4 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.at/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Austria'
elif parkrun['properties']['countrycode'] == 14 :
parkrun['properties']['Website'] = 'https://www.parkrun.ca/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Canada'
elif parkrun['properties']['countrycode'] == 23 :
parkrun['properties']['Website'] = 'https://www.parkrun.dk/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Denmark'
elif parkrun['properties']['countrycode'] == 30 :
parkrun['properties']['Website'] = 'https://www.parkrun.fi/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Finland'
elif parkrun['properties']['countrycode'] == 31 :
parkrun['properties']['Website'] = 'https://www.parkrun.fr/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'France'
elif parkrun['properties']['countrycode'] == 32 :
parkrun['properties']['Website'] = 'https://www.parkrun.com.de/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Germany'
elif parkrun['properties']['countrycode'] == 42 :
parkrun['properties']['Website'] = 'https://www.parkrun.ie/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Ireland'
elif parkrun['properties']['countrycode'] == 44 :
parkrun['properties']['Website'] = 'https://www.parkrun.it/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Italy'
elif parkrun['properties']['countrycode'] == 46 :
parkrun['properties']['Website'] = 'https://www.parkrun.jp/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Japan'
elif parkrun['properties']['countrycode'] == 57 :
parkrun['properties']['Website'] = 'https://www.parkrun.my/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Malaysia'
elif parkrun['properties']['countrycode'] == 65 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.nz/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'New Zealand'
elif parkrun['properties']['countrycode'] == 67 :
parkrun['properties']['Website'] = 'https://www.parkrun.no/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Norway'
elif parkrun['properties']['countrycode'] == 74 :
parkrun['properties']['Website'] = 'https://www.parkrun.pl/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Poland'
elif parkrun['properties']['countrycode'] == 79 :
parkrun['properties']['Website'] = 'https://www.parkrun.ru/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Russia'
elif parkrun['properties']['countrycode'] == 82 :
parkrun['properties']['Website'] = 'https://www.parkrun.sg/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Singapore'
elif parkrun['properties']['countrycode'] == 85 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.za/'+parkrun['properties']['eventname']
if parkrun['properties']['EventLongName'] in ['Windhoek parkrun','Omeya parkrun','Swakopmund parkrun','Walvis Bay parkrun']:
parkrun['properties']['Country'] = 'Namibia'
elif parkrun['properties']['EventLongName'] in ['Mbabane parkrun']:
parkrun['properties']['Country'] = 'Eswatini'
else:
parkrun['properties']['Country'] = 'South Africa'
elif parkrun['properties']['countrycode'] == 88 :
parkrun['properties']['Website'] = 'https://www.parkrun.se/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Sweden'
elif parkrun['properties']['countrycode'] == 97 :
parkrun['properties']['Website'] = 'https://www.parkrun.org.uk/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'United Kingdom'
elif parkrun['properties']['countrycode'] == 98 :
parkrun['properties']['Website'] = 'https://www.parkrun.us/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'USA'
elif parkrun['properties']['countrycode'] == 64 :
parkrun['properties']['Website'] = 'https://www.parkrun.co.nl/'+parkrun['properties']['eventname']
parkrun['properties']['Country'] = 'Netherlands'
else: parkrun['properties']['Website'] = 'Unavailable'
new = True
for event in states_list:
if event[0] == parkrun['properties']['EventLongName']:
#print(now(),parkrun['properties']['EventShortName'],'already saved state')
new_states_list.append(event)
parkrun['properties']['State'] = event[2]
parkrun['properties']['County'] = event[3]
new = False
if new == True:
#print(now(),parkrun['properties']['EventShortName'],'not saved state')
GEONAME_USERNAME = '_josh_justjosh'
url = "http://api.geonames.org/countrySubdivision?lat="+str(parkrun['geometry']['coordinates'][1])+"&lng="+str(parkrun['geometry']['coordinates'][0])+"&radius=1.5&maxRows=1&level=2&username="+GEONAME_USERNAME
root = ET.fromstring(requests.get(url).text.strip())
try:
state = root.find('countrySubdivision').find('adminName1').text
except:
state = "-Unknown-"
print(now(),parkrun['properties']['EventLongName'],"- State not Found -",url)
try:
county = root.find('countrySubdivision').find('adminName2').text
except:
county = "-Unknown-"
print(now(),parkrun['properties']['EventLongName'],'- County not found -',url)
parkrun['properties']['State'] = state
parkrun['properties']['County'] = county
add = [parkrun['properties']['EventLongName'],parkrun['properties']['Country'],state,county]
new_states_list.append(add)
parkrun['properties']['description']='<h4 style="margin: 0 0 8px;">'+parkrun['properties']['EventLongName']+'</h4><table><tr><th>Status:</th><td'
if len(parkrun['properties']['Cancellations']) > 1:
parkrun['properties']['description']+=' colspan='+str(len(parkrun['properties']['Cancellations']))+' '
parkrun['properties']['description']+='>'+parkrun['properties']['Status']+'</td></tr>'
if len(parkrun['properties']['Cancellations']) == 1:
parkrun['properties']['description']+='<tr><th>Date Cancelled:</th><td>'+datetime.datetime.strptime(parkrun['properties']['Cancellations'][0]['DateCancelled'],'%Y-%m-%d').strftime('%A, %e %B %Y')+'</td></tr>'
parkrun['properties']['description']+='<tr><th>Cancellation Note:</th><td>'+parkrun['properties']['Cancellations'][0]['ReasonCancelled']+'</td></tr>'
elif len(parkrun['properties']['Cancellations']) > 1:
parkrun['properties']['description']+='<tr><th>Date Cancelled:</th>'
for i in parkrun['properties']['Cancellations']:
parkrun['properties']['description']+='<td>'+datetime.datetime.strptime(i['DateCancelled'],'%Y-%m-%d').strftime('%A, %e %B %Y')+'</td>'
parkrun['properties']['description']+='</tr><tr><th>Cancellation Note:</th>'
for i in parkrun['properties']['Cancellations']:
parkrun['properties']['description']+='<td>'+i['ReasonCancelled']+'</td>'
parkrun['properties']['description']+='</tr>'
if parkrun['properties']['Website'] != 'Unavailable':
parkrun['properties']['description']+='<tr><th>Website:</th><td'
if len(parkrun['properties']['Cancellations']) > 1:
parkrun['properties']['description']+=' colspan='+str(len(parkrun['properties']['Cancellations']))+' '
parkrun['properties']['description']+='><a href="'+parkrun['properties']['Website']+'">'+parkrun['properties']['Website'].replace('https://www.','')+'</a></td></tr>'
else: print(now(),parkrun['properties']['EventShortName'],'- Website Not Generated')
parkrun['properties']['description']+='</table>'
x += 1
#print(now(),x,"/",len(events['features']),'-',parkrun['properties']['EventShortName'],"processed")
#if x == 1750:
# break
with open('_data/events.json','w', encoding='utf-8') as f:
f.write(json.dumps(events, indent=2))
print(now(),'events.json saved')
cancellation_dates = list(dict.fromkeys(cancellation_dates))
cancellation_dates.sort()
with open('_data/cancellation-dates.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Dates'])
for date in cancellation_dates:
tsv_writer.writerow([date])
print(now(),"cancellation-dates.tsv saved")
events_data = []
for event in events['features']:
out = []
out.append(event['properties']['EventLongName'])
out.append(event['geometry']['coordinates'][1])
out.append(event['geometry']['coordinates'][0])
out.append(event['properties']['Country'])
out.append(event['properties']['State'])
out.append(event['properties']['County'])
out.append(event['properties']['Status'])
out.append(event['properties']['Cancellations'])
out.append(event['properties']['Website'])
events_data.append(out)
events_data.sort()
with open('_data/events-table.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Event','Latitude','Longitude','Country','State','County','Status','Cancellations','Website'])
for event in events_data:
tsv_writer.writerow(event)
print(now(),"events-table.tsv saved")
countries = {
'Australia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Austria': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Canada': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Denmark': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Eswatini': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Finland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'France': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Germany': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Ireland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Italy': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Japan': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Malaysia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Namibia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Netherlands': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'New Zealand': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Norway': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Poland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Russia': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Singapore': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'South Africa': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Sweden': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'United Kingdom': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'USA': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Total': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
}
totals= {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
}
for parkrun in events['features']:
if parkrun['properties']['Status'] == 'parkrunning':
countries[parkrun['properties']['Country']]['parkrunning'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior parkrunning':
countries[parkrun['properties']['Country']]['junior parkrunning'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == '5k Cancellation':
countries[parkrun['properties']['Country']]['5k Cancellations'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == 'junior Cancellation':
countries[parkrun['properties']['Country']]['junior Cancellations'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
elif parkrun['properties']['Status'] == 'PtR':
countries[parkrun['properties']['Country']]['5k Cancellations'] += 1
countries[parkrun['properties']['Country']]['Total'] += 1
else:
print(now(),"Error:",parkrun['properties']['EventLongName'])
#print(now(),countries)
for country,data in countries.items():
totals['parkrunning'] += data['parkrunning']
totals['junior parkrunning'] += data['junior parkrunning']
totals['5k Cancellations'] += data['5k Cancellations']
totals['junior Cancellations'] += data['junior Cancellations']
totals['Total'] += data['Total']
countries['Total'] = totals
with open('_data/countries-data.tsv','wt', encoding='utf-8', newline='') as f:
tsv_writer = csv.writer(f, delimiter='\t')
tsv_writer.writerow(['Country','parkrunning','junior parkrunning','5k Cancellations','junior Cancellations','Total'])
for i,j in countries.items():
out = [i]
for k,l in j.items():
if l != 0:
out.append(l)
else:
out.append('')
tsv_writer.writerow(out)
print(now(),"countries-data.tsv saved")
uk = {
'England': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Northern Ireland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Scotland': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Wales': {
'parkrunning': 0,
'junior parkrunning':0,
'5k Cancellations':0,
'junior Cancellations':0,
'Total':0
},
'Other': | |
= core.Place()
p.set_place(heter_place)
self._default_executor = core.Executor(p)
# TODO(zhangminxu): support heterps pipeline training using exe.run
if "startup_program" in program._heter_pipeline_opt:
#print("get startup_program from _pipeline_opt")
program = program._heter_pipeline_opt["startup_program"]
if isinstance(program, Program) and \
len(program.global_block().ops) == 0:
if use_default_main_program:
error_info = "Now you are using default_main_program, "\
"but there are no operators in the program to be executed. "\
"Please ensure you create model correctly or you can pass "\
"the Program or the CompiledProgram manually."
else:
error_info = "There are no operators in the program to be executed. "\
"If you pass Program manually, please use fluid.program_guard "\
"to ensure the current Program is being used."
warnings.warn(error_info)
if scope is None:
scope = global_scope()
# use_prune can be overrided by putting optimize_ops in fetch_list
_origin_fetch_list = fetch_list
_origin_program = program
fetch_list, optimize_ops = self._split_optimize_ops_in_fetch_list(
fetch_list)
if optimize_ops:
use_prune = True
if use_prune:
cache_key = _get_strong_program_cache_key(program, feed,
_origin_fetch_list)
cached_pruned_program = self._get_pruned_program_cache(cache_key)
if cached_pruned_program is None:
if isinstance(program, compiler.CompiledProgram):
program_scope_cache = self._get_pruned_program_scope_cache(
str(id(_origin_program)))
# copy the original program, so it can be cached.
program = copy.copy(program)
# share the local scopes for same original CompiledProgram.
program._share_vars_from = program_scope_cache
if self._get_pruned_program_scope_cache(
str(id(_origin_program))) is None:
self._add_pruned_program_scope_cache(
str(id(_origin_program)), program)
pruned_program = self._prune_program(program, feed, fetch_list,
optimize_ops)
self._add_pruned_program_cache(cache_key, pruned_program)
else:
pruned_program = cached_pruned_program
feed = self._update_feed(pruned_program, feed)
program = pruned_program
def _can_use_interpreter_core(program, place):
if core.is_compiled_with_npu() or core.is_compiled_with_xpu(
) or core.is_compiled_with_mlu() or core.is_compiled_with_ipu(
) or isinstance(place, core.CustomPlace):
return False
compiled = isinstance(program, compiler.CompiledProgram)
# print("compiled is : {}".format(compiled))
# NOTE(zhiqiu): do not support compiled program now
if compiled:
return False
# if program._is_data_parallel and len(
# program._get_places(place, program._places)) == 1:
# return True
# else:
# return False
else:
if isinstance(program._graph, compiler.CompiledProgram):
return False
assert isinstance(program, Program)
return True
# NOTE: This is an experimental feature. If `export FLAGS_USE_STANDALONE_EXECUTOR=1 `,
# use StandaloneExecutor to run the program.
if self._enable_interpreter_core and _can_use_interpreter_core(
program, self.place):
inner_program = program._program if isinstance(
program, compiler.CompiledProgram) else program
if not inner_program._is_start_up_program_:
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s"
% (type(feed)))
feed = self._update_feed(program, feed)
key = _get_strong_program_cache_key(inner_program, feed,
fetch_list)
# a little bit tricy here, use inner_program before _add_feed_fetch_ops to get key
# while use program to geet _StandaloneExecutor
if key not in self._executor_cache._cached_executors:
program = self._add_feed_fetch_ops(
program=inner_program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
use_fetch_v2=True)
new_program = program.clone()
new_exe = _StandaloneExecutor(self.place, new_program,
scope)
self._executor_cache._cached_executors[key] = (new_program,
new_exe)
program, new_exe = self._executor_cache._cached_executors[key]
self._feed_data(program, feed, feed_var_name, scope)
if hasattr(program, 'lr_sheduler'):
from paddle.optimizer.lr import LRScheduler
assert isinstance(program.lr_sheduler,
LRScheduler), "must be LRScheduler"
lr_sheduler = program.lr_sheduler
lr_value = lr_sheduler()
lr_var = program.global_block().vars[lr_sheduler._var_name]
data = np.array([lr_value
]).astype(convert_dtype(lr_var.dtype))
tensor = core.get_variable_tensor(scope,
lr_sheduler._var_name)
# NOTE(dev): `set` always call TensorCopySync that is a
# blocking behavior. So we use `_copy_from` to replace it.
cpu_tensor = _as_lodtensor(data, core.CPUPlace())
tensor._copy_from(cpu_tensor, self.place)
return new_exe.run(list(feed.keys()), fetch_list, return_numpy)
compiled = isinstance(program, compiler.CompiledProgram)
# Check if fluid.data() variable no feed data
if use_prune:
if compiled:
global_block = program._program.global_block()
else:
global_block = program.global_block()
for varname in global_block.vars:
vardesc = global_block.desc.find_var(cpt.to_bytes(varname))
varobj = global_block.vars[varname]
# Can not check var build by fluid.layers.data(), bucause fluid.layers.data() had not set need_check_feed
if vardesc.persistable() == False and \
vardesc.type() == core.VarDesc.VarType.LOD_TENSOR and \
vardesc.need_check_feed() == True and \
varobj.stop_gradient == True and \
varobj.is_data == True and \
varobj.belong_to_optimizer == False and \
varname not in feed:
raise ValueError('Need feed data for variable %s' % varname)
acp._auto_checkpoint(self, program)
# For backward compatibility, run directly.
if not compiled:
# In distributed training, the compiled program is saved in Program._graph
has_compiled_graph = isinstance(program._graph,
compiler.CompiledProgram)
if has_compiled_graph:
program._graph._compile(scope, self.place)
# _graph in program does not support inference since the _graph is optimized
# through optimizer.minimize function and should not be used as inference graph
# assert not program._graph._is_inference
return self._run_parallel(program._graph,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy,
return_merged=return_merged)
return self._run_program(program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name,
scope=scope,
return_numpy=return_numpy,
use_program_cache=use_program_cache)
program._compile(scope, self.place)
if program._is_inference:
return self._run_inference(program._executor, feed)
else:
return self._run_parallel(program,
scope=scope,
feed=feed,
fetch_list=fetch_list,
fetch_var_name=fetch_var_name,
return_numpy=return_numpy,
return_merged=return_merged)
def _run_program(self, program, feed, fetch_list, feed_var_name,
fetch_var_name, scope, return_numpy, use_program_cache):
from paddle.optimizer.lr import LRScheduler
if feed is None:
feed = {}
elif isinstance(feed, (list, tuple)):
assert len(feed) == 1, "Not compiled with data parallel"
feed = feed[0]
if not isinstance(feed, dict):
raise TypeError(
"feed requires dict as its Parameter. But you passed in %s" %
(type(feed)))
assert program is not None, "The program should not be Empty"
if not isinstance(program, Program):
raise TypeError(
"Executor requires Program as its Parameter. But you passed in %s"
% (type(program)))
if not isinstance(fetch_var_name, str):
raise TypeError(
"The name of fetch variable requires string as its Parameter. But you passed in %s"
% (type(fetch_var_name)))
if use_program_cache:
cache_key = _get_strong_program_cache_key(program, feed, fetch_list)
cached_program = self._get_program_cache(cache_key)
cached_ctx = self._get_ctx_cache(cache_key)
cached_scope = self._get_scope_cache(cache_key)
if cached_program is None:
cached_program = self._add_feed_fetch_ops(
program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._add_program_cache(cache_key, cached_program)
fetch_list_str = list(map(_to_name_str, fetch_list))
cached_ctx = self._default_executor.prepare(
cached_program.desc, 0, fetch_list_str, False)
# currently, we cache program, vars, sub_scope here
# we suppose that in a life cycle of training, a user
# will not create many programs. So, here the basic
# rule of caching is to cache all unseen (program, var, scope)
# when a user use use_program_cache.
cached_scope = scope.new_scope()
self._default_executor.create_variables(cached_program.desc,
cached_scope, 0)
self._add_ctx_cache(cache_key, cached_ctx)
self._add_scope_cache(cache_key, cached_scope)
program = cached_program
ctx = cached_ctx
scope = cached_scope
else:
program = self._add_feed_fetch_ops(program=program,
feed=feed,
fetch_list=fetch_list,
feed_var_name=feed_var_name,
fetch_var_name=fetch_var_name)
self._feed_data(program, feed, feed_var_name, scope)
if hasattr(program, 'lr_sheduler'):
assert isinstance(program.lr_sheduler,
LRScheduler), "must be LRScheduler"
lr_sheduler = program.lr_sheduler
lr_value = lr_sheduler()
lr_var = program.global_block().vars[lr_sheduler._var_name]
data = np.array([lr_value]).astype(convert_dtype(lr_var.dtype))
tensor = core.get_variable_tensor(scope, lr_sheduler._var_name)
tensor.set(data, self.place)
if not use_program_cache:
self._default_executor.run(program.desc, scope, 0, True, True,
[fetch_var_name])
else:
self._default_executor.run_prepared_ctx(ctx, scope, False, False,
False)
arr = scope.find_var(fetch_var_name).get_fetch_list()
tensors = arr._move_to_list()
if return_numpy:
return as_numpy(tensors)
else:
return tensors
def _run_inference(self, exe, feed):
return exe.run(feed)
def _check_fetch_list(self, fetch_list):
is_fetch_var = lambda var: isinstance(var,
(Variable, str, six.string_types))
is_tuple_list = lambda var: isinstance(var, (tuple, list))
if fetch_list is None: return []
if is_fetch_var(fetch_list): return [fetch_list]
assert is_tuple_list(fetch_list), \
"Currently , The fetch_list type only should be list or tuple, \n"\
"but the input type is {}. For more information please refer to \n"\
"the executor.run(...).".format(type(fetch_list))
res = []
for i, var in enumerate(fetch_list):
if is_fetch_var(var):
res.append(var)
# such as [x, 'mean_out', loss]
elif is_tuple_list(var):
if all(is_fetch_var(v) for v in var):
res.extend(list(var))
else:
res.append(var)
else:
raise TypeError(
"Require fetch_list[{}] 's type shall be one of (Variable, str), but received {}."
.format(i,
type(var).__name__))
return res
def _dump_debug_info(self, program=None, trainer=None):
with open(str(id(program)) + "_train_desc.prototxt", "w") as fout:
fout.write(str(trainer))
if program._fleet_opt and "fleet_desc" in program._fleet_opt:
with open("fleet_desc.prototxt", "w") as fout:
fout.write(str(program._fleet_opt["fleet_desc"]))
def _adjust_pipeline_resource(self, pipeline_opt, dataset, pipeline_num):
filelist_length = len(dataset.dataset.get_filelist())
if filelist_length < pipeline_num:
pipeline_num = filelist_length
print(
"Pipeline training: setting the pipeline num to %d is enough because there are only %d files"
% (filelist_length, filelist_length))
if filelist_length < pipeline_num * pipeline_opt["concurrency_list"][0]:
print(
"Pipeline training: setting the 1st element in concurrency_list to %d is enough because there are only %d files"
% (filelist_length // pipeline_num, filelist_length))
pipeline_opt["concurrency_list"][
0] = filelist_length // pipeline_num
dataset.set_thread(pipeline_opt["concurrency_list"][0] * pipeline_num)
return pipeline_num
def _prepare_trainer(self,
program=None,
dataset=None,
scope=None,
thread=0,
debug=False,
fetch_list=None,
fetch_info=None,
print_period=100):
is_heter = 0
use_ps_gpu = 0
if not program._fleet_opt is None:
if program._fleet_opt.get("worker_class", "") == "HeterCpuWorker":
is_heter = 1
if program._fleet_opt.get("trainer", "") == "HeterXpuTrainer":
is_heter = 1
if program._fleet_opt.get("use_ps_gpu", False):
use_ps_gpu = True
if scope is None:
scope = global_scope()
if fetch_list is None:
fetch_list = []
if fetch_info is None:
fetch_info = []
assert len(fetch_list) == len(fetch_info)
compiled = isinstance(program, compiler.CompiledProgram)
if is_heter:
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
from paddle.fluid.incubate.fleet.utils.fleet_util import FleetUtil
fu = FleetUtil()
ret = fu.split_program_by_device(program)
if not compiled:
# TODO: Need a better way to distinguish and specify different execution mode
if program._pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program._pipeline_opt)
elif program._heter_pipeline_opt:
trainer = TrainerFactory()._create_trainer(
program._heter_pipeline_opt)
else:
trainer = TrainerFactory()._create_trainer(program._fleet_opt)
trainer._set_thread_barrier(program._is_distributed)
trainer._set_program(program)
if | |
'fields_difference': fields_difference,
'field_mask': field_mask,
'field_texture': field_texture,
'sigma_zh': sigma_zh,
'stat_test_lag1': stat_test_lag1,
'stat_test_lag2': stat_test_lag2,
'mean_phase': mean_phase,
'differential_phase': differential_phase,
'unfolded_differential_phase': unfolded_differential_phase,
'corrected_differential_phase': corrected_differential_phase,
'uncorrected_differential_phase': uncorrected_differential_phase,
'uncorrected_unfiltered_differential_phase': (
uncorrected_unfiltered_differential_phase),
'system_differential_phase': system_differential_phase,
'specific_differential_phase': specific_differential_phase,
'corrected_specific_differential_phase': (
corrected_specific_differential_phase),
'uncorrected_specific_differential_phase': (
uncorrected_specific_differential_phase),
'uncorrected_unfiltered_specific_differential_phase': (
uncorrected_unfiltered_specific_differential_phase),
'linear_depolarization_ratio': linear_depolarization_ratio,
'linear_depolarization_ratio_h': linear_depolarization_ratio_h,
'linear_depolarization_ratio_v': linear_depolarization_ratio_v,
'circular_depolarization_ratio': circular_depolarization_ratio,
'signal_quality_index': signal_quality_index,
'signal_quality_index_vv': signal_quality_index_vv,
'unfiltered_signal_quality_index': unfiltered_signal_quality_index,
'unfiltered_signal_quality_index_vv': unfiltered_signal_quality_index_vv,
'signal_to_noise_ratio': signal_to_noise_ratio,
'signal_to_noise_ratio_hh': signal_to_noise_ratio_hh,
'signal_to_noise_ratio_vv': signal_to_noise_ratio_vv,
'noisedBZ_hh': noisedBZ_hh,
'noisedBZ_vv': noisedBZ_vv,
'noisedBm_hh': noisedBm_hh,
'noisedBm_vv': noisedBm_vv,
'noisedBADU_hh': noisedBADU_hh,
'noisedBADU_vv': noisedBADU_vv,
'noiseADU_hh': noiseADU_hh,
'noiseADU_vv': noiseADU_vv,
'noise_pos_h': noise_pos_h,
'noise_pos_v': noise_pos_v,
'transmitted_signal_power_h': transmitted_signal_power_h,
'transmitted_signal_power_v': transmitted_signal_power_v,
'complex_spectra_hh_ADU': complex_spectra_hh_ADU,
'complex_spectra_vv_ADU': complex_spectra_vv_ADU,
'spectral_power_hh_ADU': spectral_power_hh_ADU,
'spectral_power_vv_ADU': spectral_power_vv_ADU,
'spectral_power_hh_dBADU': spectral_power_hh_dBADU,
'spectral_power_vv_dBADU': spectral_power_vv_dBADU,
'spectral_power_hh_dBm': spectral_power_hh_dBm,
'spectral_power_vv_dBm': spectral_power_vv_dBm,
'spectral_noise_power_hh_dBZ': spectral_noise_power_hh_dBZ,
'spectral_noise_power_vv_dBZ': spectral_noise_power_vv_dBZ,
'spectral_noise_power_hh_dBm': spectral_noise_power_hh_dBm,
'spectral_noise_power_vv_dBm': spectral_noise_power_vv_dBm,
'spectral_noise_power_hh_dBADU': spectral_noise_power_hh_dBADU,
'spectral_noise_power_vv_dBADU': spectral_noise_power_vv_dBADU,
'spectral_noise_power_hh_ADU': spectral_noise_power_hh_ADU,
'spectral_noise_power_vv_ADU': spectral_noise_power_vv_ADU,
'spectral_phase_hh': spectral_phase_hh,
'spectral_phase_vv': spectral_phase_vv,
'spectral_reflectivity_hh': spectral_reflectivity_hh,
'spectral_reflectivity_vv': spectral_reflectivity_vv,
'spectral_differential_reflectivity': spectral_differential_reflectivity,
'spectral_differential_phase': spectral_differential_phase,
'spectral_copolar_correlation_coefficient': (
spectral_copolar_correlation_coefficient),
'unfiltered_complex_spectra_hh_ADU': unfiltered_complex_spectra_hh_ADU,
'unfiltered_complex_spectra_vv_ADU': unfiltered_complex_spectra_vv_ADU,
'unfiltered_spectral_power_hh_ADU': unfiltered_spectral_power_hh_ADU,
'unfiltered_spectral_power_vv_ADU': unfiltered_spectral_power_vv_ADU,
'unfiltered_spectral_power_hh_dBADU': unfiltered_spectral_power_hh_dBADU,
'unfiltered_spectral_power_vv_dBADU': unfiltered_spectral_power_vv_dBADU,
'unfiltered_spectral_power_hh_dBm': unfiltered_spectral_power_hh_dBm,
'unfiltered_spectral_power_vv_dBm': unfiltered_spectral_power_vv_dBm,
'unfiltered_spectral_phase_hh': unfiltered_spectral_phase_hh,
'unfiltered_spectral_phase_vv': unfiltered_spectral_phase_vv,
'unfiltered_spectral_reflectivity_hh': unfiltered_spectral_reflectivity_hh,
'unfiltered_spectral_reflectivity_vv': unfiltered_spectral_reflectivity_vv,
'unfiltered_spectral_differential_reflectivity': (
unfiltered_spectral_differential_reflectivity),
'unfiltered_spectral_differential_phase': (
unfiltered_spectral_differential_phase),
'unfiltered_spectral_copolar_correlation_coefficient': (
unfiltered_spectral_copolar_correlation_coefficient),
'IQ_hh_ADU': IQ_hh_ADU,
'IQ_vv_ADU': IQ_vv_ADU,
'IQ_noise_power_hh_dBZ': IQ_noise_power_hh_dBZ,
'IQ_noise_power_vv_dBZ': IQ_noise_power_vv_dBZ,
'IQ_noise_power_hh_dBm': IQ_noise_power_hh_dBm,
'IQ_noise_power_vv_dBm': IQ_noise_power_vv_dBm,
'IQ_noise_power_hh_dBADU': IQ_noise_power_hh_dBADU,
'IQ_noise_power_vv_dBADU': IQ_noise_power_vv_dBADU,
'IQ_noise_power_hh_ADU': IQ_noise_power_hh_ADU,
'IQ_noise_power_vv_ADU': IQ_noise_power_vv_ADU,
'rain_rate': rain_rate,
'bird_density': bird_density,
'sun_hit_h': sun_hit_h,
'sun_hit_v': sun_hit_v,
'sun_hit_zdr': sun_hit_zdr,
'radar_estimated_rain_rate': radar_estimated_rain_rate,
'corrected_radar_estimated_rain_rate': corrected_radar_estimated_rain_rate,
'rainfall_accumulation': rainfall_accumulation,
'precipitation_type': precipitation_type,
'radar_echo_classification': radar_echo_classification,
'corrected_radar_echo_classification': corrected_radar_echo_classification,
'radar_echo_classification_MF': radar_echo_classification_MF,
'hydroclass_entropy': hydroclass_entropy,
'proportion_AG': proportion_AG,
'proportion_CR': proportion_CR,
'proportion_LR': proportion_LR,
'proportion_RP': proportion_RP,
'proportion_RN': proportion_RN,
'proportion_VI': proportion_VI,
'proportion_WS': proportion_WS,
'proportion_MH': proportion_MH,
'proportion_IH': proportion_IH,
'probability_AG': probability_AG,
'probability_CR': probability_CR,
'probability_LR': probability_LR,
'probability_RP': probability_RP,
'probability_RN': probability_RN,
'probability_VI': probability_VI,
'probability_WS': probability_WS,
'probability_MH': probability_MH,
'probability_IH': probability_IH,
'hydroclass_confidence': hydroclass_confidence,
'radar_echo_id': radar_echo_id,
'clutter_exit_code': clutter_exit_code,
'melting_layer': melting_layer,
'melting_layer_height': melting_layer_height,
'probability_of_hail': probability_of_hail,
'maximum_expected_severe_hail_size': maximum_expected_severe_hail_size,
'maximum_echo': maximum_echo,
'maximum_echo_height': maximum_echo_height,
'echo_top_15dBZ': echo_top_15dBZ,
'echo_top_20dBZ': echo_top_20dBZ,
'echo_top_45dBZ': echo_top_45dBZ,
'echo_top_50dBZ': echo_top_50dBZ,
'vertically_integrated_liquid': vertically_integrated_liquid,
'specific_attenuation': specific_attenuation,
'path_integrated_attenuation': path_integrated_attenuation,
'specific_differential_attenuation': specific_differential_attenuation,
'path_integrated_differential_attenuation': (
path_integrated_differential_attenuation),
'corrected_specific_attenuation': corrected_specific_attenuation,
'corrected_path_integrated_attenuation': (
corrected_path_integrated_attenuation),
'corrected_specific_differential_attenuation': (
corrected_specific_differential_attenuation),
'corrected_path_integrated_differential_attenuation': (
corrected_path_integrated_differential_attenuation),
'temperature': temperature,
'iso0': iso0,
'height_over_iso0': height_over_iso0,
'iso0_height': iso0_height,
'cosmo_index': cosmo_index,
'hzt_index': hzt_index,
'visibility': visibility,
'mininum_visible_elevation': mininum_visible_elevation,
'differential_phase_texture': differential_phase_texture,
'cross_correlation_ratio_texture': cross_correlation_ratio_texture,
'differential_reflectivity_texture': differential_reflectivity_texture,
'reflectivity_texture': reflectivity_texture,
'eastward_wind_component': eastward_wind_component,
'northward_wind_component': northward_wind_component,
'vertical_wind_component': vertical_wind_component,
'azimuthal_horizontal_wind_component':
azimuthal_horizontal_wind_component,
'vertical_wind_shear': vertical_wind_shear,
'wind_speed': wind_speed,
'wind_direction': wind_direction,
'height': height,
'number_of_samples': number_of_samples,
'standard_deviation': standard_deviation,
'sum': sum,
'sum_squared': sum_squared,
'colocated_gates': colocated_gates,
'time_avg_flag': time_avg_flag,
'occurrence': occurrence,
'frequency_of_occurrence': frequency_of_occurrence,
'interpolated_profile': interpolated_profile,
'radial_wind_speed': radial_wind_speed,
'radial_wind_speed_ci': radial_wind_speed_ci,
'radial_wind_speed_status': radial_wind_speed_status,
'doppler_spectrum_width': doppler_spectrum_width,
'doppler_spectrum_mean_error': doppler_spectrum_mean_error,
'atmospherical_structures_type': atmospherical_structures_type,
'relative_beta': relative_beta,
'absolute_beta': absolute_beta,
'cnr': cnr,
'avg_reflectivity': avg_reflectivity,
'npoints_reflectivity': npoints_reflectivity,
'quant05_reflectivity': quant05_reflectivity,
'quant10_reflectivity': quant10_reflectivity,
'quant20_reflectivity': quant20_reflectivity,
'quant50_reflectivity': quant50_reflectivity,
'quant80_reflectivity': quant80_reflectivity,
'quant90_reflectivity': quant90_reflectivity,
'quant95_reflectivity': quant95_reflectivity,
'avg_radar_estimated_rain_rate': avg_radar_estimated_rain_rate,
'npoints_radar_estimated_rain_rate': npoints_radar_estimated_rain_rate,
'quant05_radar_estimated_rain_rate': quant05_radar_estimated_rain_rate,
'quant10_radar_estimated_rain_rate': quant10_radar_estimated_rain_rate,
'quant20_radar_estimated_rain_rate': quant20_radar_estimated_rain_rate,
'quant50_radar_estimated_rain_rate': quant50_radar_estimated_rain_rate,
'quant80_radar_estimated_rain_rate': quant80_radar_estimated_rain_rate,
'quant90_radar_estimated_rain_rate': quant90_radar_estimated_rain_rate,
'quant95_radar_estimated_rain_rate': quant95_radar_estimated_rain_rate,
'avg_velocity': avg_velocity,
'npoints_velocity': npoints_velocity,
'quant05_velocity': quant05_velocity,
'quant10_velocity': quant10_velocity,
'quant20_velocity': quant20_velocity,
'quant50_velocity': quant50_velocity,
'quant80_velocity': quant80_velocity,
'quant90_velocity': quant90_velocity,
'quant95_velocity': quant95_velocity,
'avg_corrected_velocity': avg_corrected_velocity,
'npoints_corrected_velocity': npoints_corrected_velocity,
'quant05_corrected_velocity': quant05_corrected_velocity,
'quant10_corrected_velocity': quant10_corrected_velocity,
'quant20_corrected_velocity': quant20_corrected_velocity,
'quant50_corrected_velocity': quant50_corrected_velocity,
'quant80_corrected_velocity': quant80_corrected_velocity,
'quant90_corrected_velocity': quant90_corrected_velocity,
'quant95_corrected_velocity': quant95_corrected_velocity,
'CTH': CTH,
'HRV': HRV,
'VIS006': VIS006,
'VIS008': VIS008,
'IR_016': IR_016,
'IR_039': IR_039,
'WV_062': WV_062,
'WV_073': WV_073,
'IR_087': IR_087,
'IR_097': IR_097,
'IR_108': IR_108,
'IR_120': IR_120,
'IR_134': IR_134,
'HRV_norm': HRV_norm,
'VIS006_norm': VIS006_norm,
'VIS008_norm': VIS008_norm,
'IR_016_norm': IR_016_norm,
}
##############################################################################
# Default metadata
#
# The DEFAULT_METADATA dictionary contains dictionaries which provide the
# default radar attribute and field metadata. When reading in a file with
# Py-ART the FILE_SPECIFIC_METADATA variable is first queued for a metadata
# dictionary, if it is not found then the metadata in DEFAULT_METADATA is
# utilized.
##############################################################################
DEFAULT_METADATA = {
# Metadata for radar attributes. These closely follow the CF/Radial
# standard
'azimuth': {
'units': 'degrees',
'standard_name': 'beam_azimuth_angle',
'long_name': 'azimuth_angle_from_true_north',
'axis': 'radial_azimuth_coordinate',
'comment': 'Azimuth of antenna relative to true north'},
'elevation': {
'units': 'degrees',
'standard_name': 'beam_elevation_angle',
'long_name': 'elevation_angle_from_horizontal_plane',
'axis': 'radial_elevation_coordinate',
'comment': 'Elevation of antenna relative to the horizontal plane'},
'number_of_pulses': {
'units': '-',
'standard_name': 'number_of_pulses',
'long_name': 'number of pulses per ray',
'axis': 'radial_pulses_coordinate'},
'scan_rate': {
'units': 'degrees_per_second',
'long_name': 'Antenna angle scan rate'},
'range': {
'units': 'meters',
'standard_name': 'projection_range_coordinate',
'long_name': 'range_to_measurement_volume',
'axis': 'radial_range_coordinate',
'spacing_is_constant': 'true',
'comment': (
'Coordinate variable for range. Range to center of each bin.')},
'time': {
'units': 'seconds',
'standard_name': 'time',
'long_name': 'time_in_seconds_since_volume_start',
'calendar': 'gregorian',
'comment': ('Coordinate variable for time. '
'Time at the center of each ray, in fractional seconds '
'since the global variable time_coverage_start')},
'metadata': {
'Conventions': 'CF/Radial instrument_parameters',
'version': '1.3',
'title': '',
'institution': '',
'references': '',
'source': '',
'history': '',
'comment': '',
'instrument_name': ''},
# Metadata for radar sweep information dictionaries
'sweep_number': {
'units': 'count',
'standard_name': 'sweep_number',
'long_name': 'Sweep number'},
'sweep_mode': {
'units': 'unitless',
'standard_name': 'sweep_mode',
'long_name': 'Sweep mode',
'comment': ('Options are: "sector", "coplane", "rhi", '
'"vertical_pointing", "idle", "azimuth_surveillance", '
'"elevation_surveillance", "sunscan", "pointing", '
'"manual_ppi", "manual_rhi"')},
'fixed_angle': {
'long_name': 'Target angle for sweep',
'units': 'degrees',
'standard_name': 'target_fixed_angle'},
'sweep_start_ray_index': {
'long_name': 'Index of first ray in sweep, 0-based',
'units': 'count'},
'sweep_end_ray_index': {
'long_name': 'Index of last ray in sweep, 0-based',
'units': 'count'},
'rays_per_sweep': {
'long_name': 'Number of rays in each sweep',
'units': 'count'},
'target_scan_rate': {
'long_name': 'Target scan rate for sweep',
'units': 'degrees_per_second',
},
'rays_are_indexed': {
'long_name': 'Flag for indexed rays',
'units': 'unitless',
'options': ('true: rays are indexed to a regular grid, ' +
'false: rays are not indexed to a regular grid'),
},
'ray_angle_res': {
'long_name': 'Angular resolution between rays',
'units': 'degrees',
'comment': 'Only applicable when rays_are_indexed variable is true',
},
# Metadata for radar location attributes
'latitude': {
'long_name': 'Latitude',
'standard_name': 'Latitude',
'units': 'degrees_north'},
'longitude': {
'long_name': 'Longitude',
'standard_name': 'Longitude',
'units': 'degrees_east'},
'altitude': {
'long_name': 'Altitude',
'standard_name': 'Altitude',
'units': 'meters',
'positive': 'up'},
'gate_x': {
'long_name': 'Cartesian x location of gate with origin at the radar',
'units': 'meters'},
'gate_y': {
'long_name': 'Cartesian y location of gate with origin at the radar',
'units': 'meters'},
'gate_z': {
'long_name': 'Cartesian z location of gate with origin at the radar',
'units': 'meters'},
'gate_edge_x': {
'long_name': 'Cartesian x location of the edges of each gate',
'units': 'meters'},
'gate_edge_y': {
'long_name': 'Cartesian y location of the edges of each gate',
'units': 'meters'},
'gate_edge_z': {
'long_name': 'Cartesian z location of the edges of each gate',
'units': 'meters'},
'gate_longitude': {
'long_name': 'Longitude of radar gate.',
'units': 'degrees_north'},
'gate_latitude': {
'long_name': 'Latitude of radar gate',
'units': 'degrees_east'},
'gate_altitude': {
'long_name': 'Altitude of radar gate',
'units': 'meters'},
# Metadata for instrument_parameter dictionary
'prt_mode': {
'comments': ('Pulsing mode Options are: "fixed", "staggered", '
'"dual". Assumed "fixed" if missing.'),
'meta_group': 'instrument_parameters',
'long_name': 'Pulsing mode',
'units': 'unitless'},
'nyquist_velocity': {
'units': 'm/s',
'comments': "Unambiguous velocity",
'meta_group': 'instrument_parameters',
'long_name': 'Nyquist velocity'},
'prt': {
'units': 'seconds',
'comments': ("Pulse repetition time. For staggered prt, "
"also see prt_ratio."),
'meta_group': 'instrument_parameters',
'long_name': 'Pulse repetition time'},
'unambiguous_range': {
'units': 'meters',
'comments': 'Unambiguous range',
'meta_group': 'instrument_parameters',
'long_name': 'Unambiguous range'},
'pulse_width': {
'units': 'seconds',
'comments': 'Pulse width',
'meta_group': 'instrument_parameters',
'long_name': 'Pulse width'},
'prt_ratio': {
'units': 'unitless',
'meta_group': 'instrument_parameters',
'long_name': 'Pulse repetition frequency ratio'},
'frequency': {
'units': 's-1',
'meta_group': 'instrument_parameters',
'long_name': 'Radiation frequency'},
'n_samples': {
'units': 'unitless',
'meta_group': 'instrument_parameters',
'long_name': 'Number of samples used to compute moments'},
'radar_antenna_gain_h': {
'units': 'dB',
'meta_group': 'instrument_parameters',
'long_name': 'Antenna gain H polarization'},
'radar_antenna_gain_v': {
'units': 'dB',
'meta_group': 'instrument_parameters',
'long_name': 'Antenna gain V polarization'},
# metadata for radar calibration constant
'calibration_constant_hh': {
'units': 'dB',
'meta_group': 'radar_calibration',
'long_name': ' radar calibration constant H polarization',
},
'calibration_constant_vv': {
'units': 'dB',
'meta_group': 'radar_calibration',
'long_name': ' radar calibration constant V polarization',
},
'transmit_power_h': {
'units': 'dBm',
'meta_group': 'radar_calibration',
'long_name': ' transmit power H channel',
},
'transmit_power_v': {
'units': 'dBm',
'meta_group': 'radar_calibration',
'long_name': ' transmit power V channel',
},
'dBADU_to_dBm_hh': {
'units': 'dBm',
'meta_group': 'radar_calibration',
'long_name': 'dBADU to dBm H polarization',
},
'dBADU_to_dBm_vv': {
'units': 'dBm',
'meta_group': 'radar_calibration',
'long_name': 'dBADU to dBm V polarization',
},
'matched_filter_loss_h': {
'units': 'dB',
'meta_group': 'radar_calibration',
'long_name': 'matched filter loss H polarization',
},
'matched_filter_loss_v': {
'units': 'dB',
'meta_group': 'radar_calibration',
'long_name': 'matched filter loss V polarization',
},
'path_attenuation': {
'units': 'dB/km',
'meta_group': 'radar_calibration',
'long_name': 'matched filter loss',
},
# non-standard parameter for specifying the PRF high/low for each ray
'prf_flag': {
'units': 'unitless',
'comments': "PRF used to collect ray. 0 for high PRF, 1 for low PRF.",
'meta_group': 'instrument_parameters',
'long_name': 'PRF flag'},
# Metadata for radar_parameter sub-convention
'radar_beam_width_h': {
'units': 'degrees',
'meta_group': 'radar_parameters',
'long_name': 'Antenna beam width H polarization'},
'radar_beam_width_v': | |
import pickle
import time
import os
import shutil
from typing import Dict, List, Tuple, Union, Set
from qanta.guesser.abstract import AbstractGuesser
from qanta.datasets.quiz_bowl import QuizBowlDataset
from qanta.datasets.wikipedia import WikipediaDataset
from qanta.preprocess import preprocess_dataset, tokenize_question
from qanta.util.io import safe_open, safe_path, shell
from qanta import logging
import tensorflow as tf
import numpy as np
log = logging.get(__name__)
TF_DAN_WE_TMP = '/tmp/qanta/deep/tf_dan_we.pickle'
TF_DAN_WE = 'tf_dan_we.pickle'
GLOVE_WE = 'data/external/deep/glove.6B.300d.txt'
DEEP_DAN_MODEL_TMP_PREFIX = '/tmp/qanta/deep/tfdan'
DEEP_DAN_MODEL_TMP_DIR = '/tmp/qanta/deep'
DEEP_DAN_MODEL_TARGET = 'tfdan_dir'
DEEP_DAN_PARAMS_TARGET = 'dan_params.pickle'
def _make_layer(i: int, in_tensor, n_out, op,
n_in=None, dropout_prob=None, batch_norm=False, batch_is_training=None):
with tf.variable_scope('layer' + str(i)):
if batch_norm and batch_is_training is None:
raise ValueError('if using batch norm then passing a training placeholder is required')
w = tf.get_variable('w', (in_tensor.get_shape()[1] if n_in is None else n_in, n_out),
dtype=tf.float32)
if dropout_prob is not None:
w = tf.nn.dropout(w, keep_prob=1 - dropout_prob)
b = tf.get_variable('b', n_out, dtype=tf.float32)
out = tf.matmul(in_tensor, w) + b
if batch_norm:
out = tf.contrib.layers.batch_norm(
out, center=True, scale=True, is_training=batch_is_training, scope='bn', fused=True)
out = (out if op is None else op(out))
# tf.summary.histogram('weights', w)
# tf.summary.histogram('biases', b)
# tf.summary.histogram('activations', out)
return out, w
def parametric_relu(_x):
alphas = tf.get_variable(
'alpha',
_x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=tf.float32
)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
def _create_embeddings(vocab: Set[str]):
embeddings = []
embedding_lookup = {}
with open(GLOVE_WE) as f:
i = 0
for l in f:
splits = l.split()
word = splits[0]
if word in vocab:
emb = [float(n) for n in splits[1:]]
embeddings.append(emb)
embedding_lookup[word] = i
i += 1
embeddings = np.array(embeddings)
mean_embedding = embeddings.mean(axis=0)
embed_with_unk = np.vstack([embeddings, mean_embedding])
embedding_lookup['UNK'] = i
return embed_with_unk, embedding_lookup
def _load_embeddings(vocab=None):
if os.path.exists(TF_DAN_WE_TMP):
log.info('Loading word embeddings from cache')
with safe_open(TF_DAN_WE_TMP, 'rb') as f:
return pickle.load(f)
else:
if vocab is None:
raise ValueError('To create fresh embeddings a vocab is needed')
with safe_open(TF_DAN_WE_TMP, 'wb') as f:
log.info('Creating word embeddings and saving to cache')
embed_and_lookup = _create_embeddings(vocab)
pickle.dump(embed_and_lookup, f)
return embed_and_lookup
def _convert_text_to_embeddings_indices(words: List[str], embedding_lookup: Dict[str, int]):
w_indices = []
for w in words:
if w in embedding_lookup:
w_indices.append(embedding_lookup[w])
else:
w_indices.append(embedding_lookup['UNK'])
return w_indices
def _compute_n_classes(labels: List[str]):
return len(set(labels))
def _compute_max_len(x_data: List[List[int]]):
return max(len(x) for x in x_data)
def _tf_format(x_data: List[List[int]], max_len: int, zero_index: int):
"""
Pad with elements until it has max_len or shorten it until it has max_len. When padding insert
the zero index so it doesn't contribute anything
:param x_data:
:param max_len:
:return:
"""
for i in range(len(x_data)):
row = x_data[i]
while len(row) < max_len:
row.append(zero_index)
x_data[i] = x_data[i][:max_len]
return x_data
def _create_batches(batch_size,
x_data: np.ndarray, y_data: np.ndarray, x_lengths: np.ndarray,
pad=False, shuffle=True):
if type(x_data) != np.ndarray or type(y_data) != np.ndarray:
raise ValueError('x and y must be numpy arrays')
if len(x_data) != len(y_data):
raise ValueError('x and y must have the same dimension')
n = len(x_data)
order = list(range(n))
if shuffle:
np.random.shuffle(order)
for i in range(0, n, batch_size):
if len(order[i:i + batch_size]) == batch_size:
x_batch = x_data[order[i:i + batch_size]]
y_batch = y_data[order[i:i + batch_size]]
x_batch_lengths = x_lengths[order[i:i + batch_size]]
yield x_batch, y_batch, x_batch_lengths
elif pad:
size = len(order[i:i + batch_size])
x_batch = np.vstack((
x_data[order[i:i + batch_size]],
np.zeros((batch_size - size, x_data.shape[1])))
)
y_batch = np.hstack((
y_data[order[i:i + batch_size]],
np.zeros((batch_size - size,)))
)
x_batch_lengths = np.hstack((
x_lengths[order[i:i + batch_size]],
np.zeros((batch_size - size,)))
)
yield x_batch, y_batch, x_batch_lengths
else:
break
def _compute_lengths(x_data):
return np.array([max(1, len(x)) for x in x_data])
class TFDanModel:
def __init__(self, dan_params: Dict, max_len: int, n_classes: int):
self.dan_params = dan_params
self.max_len = max_len
self.n_classes = n_classes
self.n_hidden_units = dan_params['n_hidden_units']
self.n_hidden_layers = dan_params['n_hidden_layers']
self.word_dropout = dan_params['word_dropout']
self.nn_dropout = dan_params['nn_dropout']
self.batch_size = dan_params['batch_size']
self.learning_rate = dan_params['learning_rate']
self.max_epochs = dan_params['max_epochs']
self.max_patience = dan_params['max_patience']
# These are set by build_tf_model
self.input_placeholder = None
self.len_placeholder = None
self.label_placeholder = None
self.loss = None
self.batch_accuracy = None
self.train_op = None
self.softmax_output = None
self.saver = None
self.file_writer = None
self.sent_vecs = None
self.avg_embeddings = None
self.word_dropout_var = None
self.nn_dropout_var = None
self.initial_embed = None
self.mean_embeddings = None
self.embed_and_zero = None
self.accuracy = None
# Set at runtime
self.summary = None
self.session = None
self.summary_counter = 0
def build_tf_model(self):
with tf.variable_scope(
'dan',
reuse=None,
initializer=tf.contrib.layers.xavier_initializer()):
embedding, embedding_word_lookup = _load_embeddings()
self.initial_embed = tf.get_variable(
'embedding',
initializer=tf.constant(embedding, dtype=tf.float32)
)
self.embed_and_zero = tf.pad(self.initial_embed, [[0, 1], [0, 0]], mode='CONSTANT')
self.input_placeholder = tf.placeholder(
tf.int32, shape=(self.batch_size, self.max_len), name='input_placeholder')
self.len_placeholder = tf.placeholder(
tf.float32, shape=self.batch_size, name='len_placeholder')
self.label_placeholder = tf.placeholder(
tf.int32, shape=self.batch_size, name='label_placeholder')
# (batch_size, max_len, embedding_dim)
self.sent_vecs = tf.nn.embedding_lookup(self.embed_and_zero, self.input_placeholder)
# Apply word level dropout
self.word_dropout_var = tf.get_variable('word_dropout', (), dtype=tf.float32,
trainable=False)
self.nn_dropout_var = tf.get_variable('nn_dropout', (), dtype=tf.float32,
trainable=False)
drop_filter = tf.nn.dropout(
tf.ones((self.max_len, 1)), keep_prob=1 - self.word_dropout_var)
self.sent_vecs = self.sent_vecs * drop_filter
in_dim = self.embed_and_zero.get_shape()[1]
self.avg_embeddings = tf.reduce_sum(self.sent_vecs, 1) / tf.expand_dims(
self.len_placeholder, 1)
layer_out = self.avg_embeddings
self.training_phase = tf.placeholder(tf.bool, name='phase')
for i in range(self.n_hidden_layers):
layer_out, w = _make_layer(
i, layer_out,
n_in=in_dim, n_out=self.n_hidden_units,
op=tf.nn.elu, dropout_prob=self.nn_dropout_var,
batch_norm=True, batch_is_training=self.training_phase
)
in_dim = None
logits, w = _make_layer(self.n_hidden_layers, layer_out, n_out=self.n_classes, op=None)
representation_layer = layer_out
with tf.name_scope('cross_entropy'):
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.to_int64(self.label_placeholder))
self.loss = tf.reduce_mean(self.loss)
tf.summary.scalar('cross_entropy', self.loss)
self.softmax_output = tf.nn.softmax(logits)
preds = tf.to_int32(tf.argmax(logits, 1))
with tf.name_scope('accuracy'):
self.batch_accuracy = tf.contrib.metrics.accuracy(preds, self.label_placeholder)
tf.summary.scalar('accuracy', self.batch_accuracy)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.name_scope('train'):
with tf.control_dependencies(update_ops):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.train_op = optimizer.minimize(self.loss)
self.summary = tf.summary.merge_all()
self.saver = tf.train.Saver()
def train(self, x_train, y_train, x_train_lengths, x_test, y_test, x_test_lengths, save=True):
with tf.Graph().as_default(), tf.Session() as session:
self.build_tf_model()
self.session = session
self.session.run(tf.global_variables_initializer())
params_suffix = ','.join('{}={}'.format(k, v) for k, v in self.dan_params.items())
self.file_writer = tf.summary.FileWriter(
os.path.join('output/tensorflow', params_suffix), session.graph)
train_losses, train_accuracies, holdout_losses, holdout_accuracies = self._train(
x_train, y_train, x_train_lengths,
x_test, y_test, x_test_lengths,
self.max_epochs, save=save
)
return train_losses, train_accuracies, holdout_losses, holdout_accuracies
def _train(self,
x_train, y_train, x_train_lengths,
x_test, y_test, x_test_lengths,
n_epochs: int, save=True):
max_accuracy = -1
patience = self.max_patience
train_accuracies = []
train_losses = []
holdout_accuracies = []
holdout_losses = []
for i in range(n_epochs):
# Training Epoch
accuracies, losses, duration = self.run_epoch(
x_train, y_train, x_train_lengths
)
log.info(
'Train Epoch: {} Avg loss: {:.4f} Accuracy: {:.4f}. Ran in {:.4f} seconds.'.format(
i, np.average(losses), np.average(accuracies), duration))
train_accuracies.append(accuracies)
train_losses.append(losses)
# Validation Epoch
val_accuracies, val_losses, val_duration = self.run_epoch(
x_test, y_test, x_test_lengths, train=False
)
val_accuracy = np.average(val_accuracies)
log.info(
'Val Epoch: {} Avg loss: {:.4f} Accuracy: {:.4f}. Ran in {:.4f} seconds.'.format(
i, np.average(val_losses), val_accuracy, val_duration))
holdout_accuracies.append(val_accuracies)
holdout_losses.append(val_losses)
# Save the model if its better
patience -= 1
if val_accuracy > max_accuracy:
max_accuracy = val_accuracy
patience = self.max_patience
if save:
log.info('New best accuracy, saving model')
self.save()
else:
log.info('New best accuracy, model saving turned off')
# Early stopping after some burn in
if patience == 0:
break
return train_losses, train_accuracies, holdout_losses, holdout_accuracies
def run_epoch(self, x_data, y_data, x_lengths, train=True):
start_time = time.time()
accuracies = []
losses = []
if train:
fetches = self.loss, self.batch_accuracy, self.train_op
else:
fetches = self.loss, self.batch_accuracy, self.summary
batch_i = 0
self.session.run(self.word_dropout_var.assign(self.word_dropout if train else 0))
self.session.run(self.nn_dropout_var.assign(self.nn_dropout if train else 0))
for x_batch, y_batch, x_len_batch in _create_batches(
self.batch_size, x_data, y_data, x_lengths):
feed_dict = {
self.input_placeholder: x_batch,
self.label_placeholder: y_batch,
self.len_placeholder: x_len_batch,
self.training_phase: int(train)
}
returned = self.session.run(fetches, feed_dict=feed_dict)
loss = returned[0]
accuracy = returned[1]
if not train:
summary = returned[2]
self.file_writer.add_summary(summary, self.summary_counter)
self.summary_counter += 1
accuracies.append(accuracy)
losses.append(loss)
batch_i += 1
duration = time.time() - start_time
return accuracies, losses, duration
def guess(self, x_test, x_test_lengths):
with tf.Graph().as_default(), tf.Session() as session:
self.build_tf_model()
self.session = session
self.session.run(tf.global_variables_initializer())
self.load()
y_test = np.zeros((x_test.shape[0]))
self.session.run(self.word_dropout_var.assign(0))
self.session.run(self.nn_dropout_var.assign(0))
predictions = []
for x_batch, y_batch, x_len_batch in _create_batches(
self.batch_size, x_test, y_test, x_test_lengths, pad=True, shuffle=False):
feed_dict = {
self.input_placeholder: x_batch,
self.label_placeholder: y_batch,
self.len_placeholder: x_len_batch,
self.training_phase: 0
}
batch_predictions = self.session.run(self.softmax_output, feed_dict=feed_dict)
predictions.append(batch_predictions)
return np.vstack(predictions)[:len(x_test)]
def save(self):
self.saver.save(self.session, safe_path(DEEP_DAN_MODEL_TMP_PREFIX))
def load(self):
self.saver.restore(self.session, DEEP_DAN_MODEL_TMP_PREFIX)
DEFAULT_DAN_PARAMS = dict(
n_hidden_units=300, n_hidden_layers=1, word_dropout=.6, batch_size=256,
learning_rate=.003, max_epochs=100, nn_dropout=0, max_patience=10
)
class DANGuesser(AbstractGuesser):
def __init__(self, dan_params=DEFAULT_DAN_PARAMS, use_wiki=False, min_answers=2):
super().__init__()
self.dan_params = dan_params
self.model = None # type: Union[None, TFDanModel]
self.embedding_lookup = None
self.max_len = None # type: Union[None, int]
self.embeddings = None
self.i_to_class = None
self.class_to_i = None
self.vocab = None
self.n_classes = None
self.use_wiki = use_wiki
self.min_answers = min_answers
@classmethod
def targets(cls) -> List[str]:
return [DEEP_DAN_PARAMS_TARGET]
def qb_dataset(self):
return QuizBowlDataset(self.min_answers)
def train(self,
training_data: Tuple[List[List[str]], List[str]]) -> None:
log.info('Preprocessing training data...')
x_train, y_train, x_test, y_test, vocab, class_to_i, i_to_class = preprocess_dataset(
training_data)
self.class_to_i = class_to_i
self.i_to_class = i_to_class
self.vocab = vocab
if self.use_wiki:
wiki_training_data = WikipediaDataset(self.min_answers).training_data()
x_train_wiki, y_train_wiki, _, _, _, _, _ = preprocess_dataset(
wiki_training_data, train_size=1, vocab=vocab, class_to_i=class_to_i,
i_to_class=i_to_class)
log.info('Creating embeddings...')
embeddings, embedding_lookup = _load_embeddings(vocab=vocab)
| |
# File: main.py
# File Created: Tuesday, 7th July 2020 9:43:48 am
# Author: <NAME> (<EMAIL>)
"""
Finite difference solver for wave equation
"""
import abc
import json
import os
from argparse import ArgumentParser
from collections import namedtuple
from time import time
from celluloid import Camera
import matplotlib.pyplot as plt
import numpy as np
import torch
from scipy.io import loadmat
from torch import nn
from tqdm import tqdm
from bhpm.util import plot_triple, timestamp
DIM = 2 # spatial dimension of the problem
torch.manual_seed(42)
# torch.set_default_dtype(torch.double)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Utilities ============================================================================
def parse_args():
parser = ArgumentParser()
parser.add_argument(
"--outdir",
type=str,
default=None,
help="If provided, load model from specified outdir instead of training a new one",
)
parser.add_argument(
"--crack",
action="store_true",
help="Use the crack case (overridden if outdir is provided)",
)
return parser.parse_args()
def ensure_config(args):
if args.outdir is None:
outdir = os.path.join(os.path.dirname(__file__), "output", timestamp())
crack = args.crack
new_model = True
if not os.path.isdir(outdir):
os.makedirs(outdir)
with open(os.path.join(outdir, "args.json"), "w") as f:
json.dump(args.__dict__, f, indent=4)
else:
outdir = args.outdir
if not os.path.isdir(outdir):
raise RuntimeError("Failed to find specified run at %s" % outdir)
with open(os.path.join(outdir, "args.json"), "r") as f:
crack = json.load(f)["crack"]
new_model = not os.path.isfile(os.path.join(outdir, "solver.pt"))
return outdir, crack, new_model
def squared_distance(x1, x2):
return (
torch.power(x1, 2).sum(dim=1, keepdim=True)
- 2.0 * x1 @ x2.T
+ torch.power(x2, 2).sum(dim=1, keepdim=True).T
)
def _reverse(x):
"""
Reverse a torch array since [::-1] isn't allowed by the current API
"""
return x[torch.arange(len(x) - 1, -1, -1)]
class Elementwise(nn.Module):
def __init__(self, f):
super().__init__()
self._f = f
def forward(self, x):
return self._f(x)
# Data =================================================================================
_us_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "ultrasound_data")
)
_data_info = (
# 0: Grains
{
"filename": os.path.abspath(
os.path.join(_us_dir, "Hsample SAW 5MHz n2", "wvf.mat",)
),
"crop": ((180, 230), (100, 150), (370, None)), # T,Y,X
},
# 1: Crack, 0 degrees
{
"filename": os.path.abspath(
os.path.join(
_us_dir,
"30Jan15 Nist crack1 240x240 12x12mm avg20 5MHz 0deg grips",
"wvf.mat",
)
),
"crop": ((None, None), (None, None), (None, None)),
},
)
def _load_data(case, verbose=False):
"""
Load data cube from a file
:return: namedtuple "Data" with np.ndarrays w/ following shapes:
* x (NX,)
* y (NY,)
* t (NT,)
* wavefield (NT, NY, NX)
"""
filename = _data_info[case]["filename"]
if not os.path.isfile(filename):
raise FileNotFoundError(
"Failed to find ultrasound data at %s.\nHave you downloaded it from Box?"
% filename
)
wvf_mat = np.array(loadmat(filename)["wvf"])[:200] # Stored as (NY,NX,NT)
wvf = np.transpose(wvf_mat, (2, 0, 1)) # (NT,NY,NX)
# crop:
crop = _data_info[case]["crop"]
wvf = wvf[crop[0][0] : crop[0][1], crop[1][0] : crop[1][1], crop[2][0] : crop[2][1]]
nt, ny, nx = wvf.shape
if verbose:
print("Loaded %i measurements after cropping" % wvf.size)
# Get coordinates
xy_scale = 0.05 # Units: mm
t_scale = 1.0 / 5.0 # Units: us (5 MHz sampling in time)
x = xy_scale * np.arange(nx)
y = xy_scale * np.arange(ny)
t = t_scale * np.arange(nt)
return namedtuple("_Data", ("x", "y", "t", "wvf"))(x, y, t, wvf)
def get_data(crack=False):
"""
:return: (NT,NY,NX)
"""
if crack:
data = _load_data(1)
wvf = data.wvf[570:630, 90:150, 70:160] # 60 x 60 x 90
rot = False
else:
data = _load_data(0)
wvf = data.wvf # 50 x 50 x 30
# Rotate so source comes from below, not the right
wvf = np.transpose(wvf[:, ::-1], (0, 2, 1)).copy() # 50 x 30 x 50
rot = True
return namedtuple("Data", ("wvf", "rot"))(torch.Tensor(wvf).to(device), rot)
# Solver ===============================================================================
class CField(nn.Module):
"""
Parent class for speed of sound fields
"""
def forward(self, x, y):
return self._forward(self._tile_cfield_inputs(x, y)).reshape(
(y.numel(), x.numel())
)
@staticmethod
def _tile_cfield_inputs(x, y):
rev_y = _reverse(y)
nx, ny = x.numel(), y.numel()
x_ = torch.stack([x for _ in range(ny)])
y_ = torch.stack([rev_y for _ in range(nx)]).T
xy = torch.stack((x_.flatten(), y_.flatten())).T
return xy
@abc.abstractmethod
def _forward(self, xy):
"""
:param xy: (NY*NX, 2)
:return: (NY*NX, 1)
"""
raise NotImplementedError()
class CFieldConstant(CField):
def __init__(self):
super().__init__()
self._raw_c = nn.Parameter(torch.tensor(0.0))
self._c_transform = torch.distributions.transforms.ExpTransform()
@property
def c(self):
return self._c_transform(self._raw_c)
def _forward(self, xy):
return self.c + 0.0 * xy[:, [0]]
class CFieldNet(CField):
def __init__(self, units=64, layers=5):
super().__init__()
self._net = nn.Sequential(
nn.Linear(2, units),
Elementwise(torch.sin),
*([nn.Linear(units, units), Elementwise(torch.sin)] * (layers - 1)),
nn.Linear(units, 1),
Elementwise(torch.exp),
)
# Init tweaks
self._net._modules["0"].weight.data = self._net._modules["0"].weight.data * 10.0
for i in range(1, len(self._net._modules)):
istr = str(i)
if hasattr(self._net._modules[istr], "bias"):
self._net._modules[istr].bias.data = (
self._net._modules[istr].bias.data * 0.0
)
def _forward(self, xy):
return self._net(xy)
def get_solver_params(data, t_edge, x_edge, y_edge, t_oversample, s_oversample):
"""
Reparameterize in terms of things that aare relative to the data being analyzed
"""
nt_data, ny_data, nx_data = data.wvf.shape
dt = 0.02 / t_oversample
h = 0.05 / s_oversample
window_stride = (t_oversample, s_oversample, s_oversample)
window_corner = (
t_edge * t_oversample,
y_edge * s_oversample,
x_edge * s_oversample,
)
nt = nt_data * t_oversample + window_corner[0] + 2
ny = s_oversample * (ny_data + y_edge + 2)
nx = s_oversample * (nx_data + 2 * x_edge)
return {
"nx": nx,
"ny": ny,
"nt": nt,
"dt": dt,
"h": h,
"window_corner": window_corner,
"window_stride": window_stride,
"data": data,
}
class Solver(nn.Module):
"""
Spatial units: mm
temporal units: usec
"""
def __init__(
self,
nx=240,
ny=140,
nt=5000,
dt=0.005, # Data is 0.2 per
h=0.05, # Data is 0.05 per
window_corner=None,
window_stride=None,
data=None,
):
super().__init__()
self._h = h
self._dt = dt
self._x = nn.Parameter(h * torch.arange(nx), requires_grad=False)
self._y = nn.Parameter(h * torch.arange(ny), requires_grad=False)
self._t = nn.Parameter(dt * torch.arange(-2, nt), requires_grad=False)
# T,Y,X
self._window_corner = (30, 20, 30) if window_corner is None else window_corner
self._window_stride = (40, 2, 2) if window_stride is None else window_stride
# self.c_field = CFieldConstant()
self.c_field = CFieldNet()
# Source f(x,t) across the whole bottom of the simulation domain
units, layers = 32, 5
self.source = nn.Sequential(
nn.Linear(2, units),
Elementwise(torch.sin),
*([nn.Linear(units, units), Elementwise(torch.sin)] * (layers - 1)),
nn.Linear(units, 1),
)
# Apply physics via convolution
self.step_kernel = nn.Conv2d(
1, 1, 3, bias=False, padding=1, padding_mode="replicate"
)
self.step_kernel.requires_grad_(False)
# Laplacian kernel:
# 0 1 0
# 1 -4 1
# 0 1 0
self.step_kernel.weight.data = torch.Tensor(
[[0.0, 1.0, 0.0], [1.0, -4.0, 1.0], [0.0, 1.0, 0.0]]
)[None][None]
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def t(self):
return self._t
@property
def nx(self):
return len(self.x)
@property
def ny(self):
return len(self.y)
@property
def nt(self):
# Account for the two "dummy" input for the two IC slices.
return len(self.t) - 2
@property
def window_corner(self):
return self._window_corner
@property
def window_stride(self):
return self._window_stride
def forward(self):
return self.simulate()
def simulate(self, verbose=False, saveas=None):
"""
:return: (NT, NY, NX)
"""
c = self.c_field(self.x, self.y)
coef = (self._dt * c / self._h) ** 2
source_f = self.source(self._source_points()).reshape((self.nt + 2, self.nx))
def step(u, u_prev, f):
"""
perform a time step
"""
u_step = coef * self.step_kernel(u[None][None])[0, 0]
u_next = self._apply_source(-u_prev + 2.0 * u + u_step, f)
return u_next
u_list = self._initial_condition(source_f[:2])
if verbose:
print("Solve...")
f_list = tqdm(source_f[2:])
else:
f_list = source_f[2:]
for f in f_list:
u_list.append(step(u_list[-1], u_list[-2], f))
u = torch.stack(u_list[2:]) # NT, NY, NX
# Visualize
if saveas is not None:
print("Save .npy...")
np.save(saveas + ".npy", u.detach().cpu().numpy())
print("Animate...")
fig = plt.figure()
ax = fig.gca()
camera = Camera(fig)
for ui in tqdm(u.detach().cpu().numpy()[::5]):
ax.imshow(ui, vmin=-0.3, vmax=0.3) # , cmap="bone")
camera.snap()
animation = camera.animate(interval=1)
animation.save(saveas + ".gif")
plt.close()
print("Done!")
return u
def apply_window(self, u):
c, s = self._window_corner, self._window_stride
return u[c[0] :: s[0], c[1] :: s[1], c[2] :: s[2]]
def to_data(self, data):
nt, ny, nx = data.wvf.shape
return self.apply_window(self.simulate())[:nt, :ny, :nx]
def loss(self, data):
"""
Assume dense measurement data for the moment
"""
u = self.to_data(data)
if not all([s_sim == s_data for s_sim, s_data in zip(u.shape, data.wvf.shape)]):
msg = (
"Simulation window can't match data (probably too small).\n"
+ "Simulation shape : "
+ str(u.shape)
+ "\n"
+ "Data shape : "
+ str(data.wvf.shape)
)
raise ValueError(msg)
return nn.MSELoss()(u, data.wvf)
def _source_points(self):
"""
:return: (x, t), shape ((NT+2)*NX, 2)
"""
x_tiled = torch.stack([self.x for _ in range(self.nt + 2)])
t_tiled = torch.stack([self.t for _ in range(self.nx)]).T
return torch.stack((x_tiled.flatten(), t_tiled.flatten())).T
def _initial_condition(self, source_f):
"""
TODO complete IC field u0(x,y) instead of initializing at zero?
:param source_f: (2, NX)
"""
return [
self._apply_source(torch.zeros(self.ny, self.nx).to(fi.device), fi)
for fi in source_f
]
def _apply_source(self, u, f):
"""
:param u: (NY,NX)
:param f: (NX,)
"""
u[-1] = f
u[-2] = f
return u
# Inference ============================================================================
def train_from_scratch(data, outdir):
iters = 10000
lr_start = 3.0e-3
lr_end = 3.0e-4
solver | |
Safari/537.36",
}
params = {
"cb": "datatable1891672",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "12",
"pageNo": "1",
"pageNum": "1",
"_": "1603023435552",
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"月份",
"当月",
"同比增长",
"环比增长",
"自年初累计",
]
return temp_df
def macro_china_hgjck():
"""
东方财富-海关进出口增减情况一览表
http://data.eastmoney.com/cjsj/hgjck.html
:return: 东方财富-海关进出口增减情况一览表
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1603023435552",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"月份",
"当月出口额-金额",
"当月出口额-同比增长",
"当月出口额-环比增长",
"当月进口额-金额",
"当月进口额-同比增长",
"当月进口额-环比增长",
"累计出口额-金额",
"累计出口额-同比增长",
"累计进口额-金额",
"累计进口额-同比增长",
]
temp_df['当月出口额-金额'] = pd.to_numeric(temp_df['当月出口额-金额'])
temp_df['当月出口额-同比增长'] = pd.to_numeric(temp_df['当月出口额-同比增长'])
temp_df['当月出口额-环比增长'] = pd.to_numeric(temp_df['当月出口额-环比增长'])
temp_df['当月进口额-金额'] = pd.to_numeric(temp_df['当月进口额-金额'])
temp_df['当月进口额-同比增长'] = pd.to_numeric(temp_df['当月进口额-同比增长'])
temp_df['当月进口额-环比增长'] = pd.to_numeric(temp_df['当月进口额-环比增长'])
temp_df['累计出口额-金额'] = pd.to_numeric(temp_df['累计出口额-金额'])
temp_df['累计出口额-同比增长'] = pd.to_numeric(temp_df['累计出口额-同比增长'])
temp_df['累计进口额-金额'] = pd.to_numeric(temp_df['累计进口额-金额'])
temp_df['累计进口额-同比增长'] = pd.to_numeric(temp_df['累计进口额-同比增长'])
return temp_df
def macro_china_czsr():
"""
东方财富-财政收入
http://data.eastmoney.com/cjsj/czsr.html
:return: 东方财富-财政收入
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"cb": "datatable5011006",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "14",
"pageNo": "1",
"pageNum": "1",
"_": "1603023435552",
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"月份",
"当月",
"当月-同比增长",
"当月-环比增长",
"累计",
"累计-同比增长",
]
return temp_df
def macro_china_whxd():
"""
东方财富-外汇贷款数据
http://data.eastmoney.com/cjsj/whxd.html
:return: 东方财富-外汇贷款数据
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"cb": "datatable8618737",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "17",
"pageNo": "1",
"pageNum": "1",
"_": "1603023435552",
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"月份",
"当月",
"同比增长",
"环比增长",
"累计",
]
return temp_df
def macro_china_wbck():
"""
东方财富-本外币存款
http://data.eastmoney.com/cjsj/wbck.html
:return: 东方财富-本外币存款
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
params = {
"cb": "datatable3653904",
"type": "GJZB",
"sty": "ZGZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "18",
"pageNo": "1",
"pageNum": "1",
"_": "1603023435552",
}
r = requests.get(url, params=params, headers=headers)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"月份",
"当月",
"同比增长",
"环比增长",
"累计",
]
return temp_df
def macro_china_hb():
"""
中国-货币净投放与净回笼
http://www.chinamoney.com.cn/chinese/hb/
:return: 货币净投放与净回笼
:rtype: pandas.DataFrame
"""
current_year = datetime.today().year
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bond-publish/TicketPutAndBackStatByWeek"
params = {
"t": "1597986289666",
"t": "1597986289666",
}
big_df = pd.DataFrame()
for year in tqdm(range(1997, current_year + 1)):
payload = {
"startWeek": f"{year}-01",
"endWeek": f"{year}-52",
"pageSize": "5000",
"pageNo": "1",
}
r = requests.post(url, params=params, data=payload)
page_num = r.json()["data"]["pageTotal"]
for page in range(1, page_num + 1):
payload = {
"startWeek": f"{year}-01",
"endWeek": f"{year}-52",
"pageSize": "5000",
"pageNo": str(page),
}
r = requests.post(url, params=params, data=payload)
temp_df = pd.DataFrame(r.json()["data"]["resultList"])
big_df = big_df.append(temp_df, ignore_index=True)
# print(big_df)
big_df = big_df.sort_values(by=["startDate"])
big_df.reset_index(inplace=True, drop=True)
big_df.columns = ["start_date", "net_put_in", "back", "end_date", "put_in", "date"]
return big_df
def macro_china_gksccz():
"""
中国-央行公开市场操作
http://www.chinamoney.com.cn/chinese/yhgkscczh/
:return: 央行公开市场操作
:rtype: pandas.DataFrame
"""
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bond-publish/TicketHandle"
params = {
"t": "1597986289666",
"t": "1597986289666",
}
big_df = pd.DataFrame()
payload = {
"pageSize": "1000",
"pageNo": "1",
}
r = requests.post(url, params=params, data=payload)
page_num = r.json()["data"]["pageTotal"]
for page in tqdm(range(1, page_num + 1)):
payload = {
"pageSize": "1000",
"pageNo": str(page),
}
r = requests.post(url, params=params, data=payload)
temp_df = pd.DataFrame(r.json()["data"]["resultList"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df = big_df.sort_values(by=["operationFromDate"])
big_df.reset_index(inplace=True, drop=True)
big_df.columns = [
"rate",
"trading_method",
"deal_amount",
"period",
"operation_from_date",
]
return big_df
def macro_china_bond_public():
"""
中国-债券信息披露-债券发行
http://www.chinamoney.com.cn/chinese/xzjfx/
:return: 债券发行
:rtype: pandas.DataFrame
"""
url = "http://www.chinamoney.com.cn/ags/ms/cm-u-bond-an/bnBondEmit"
payload = {
"enty": "",
"bondType": "",
"bondNameCode": "",
"leadUnderwriter": "",
"pageNo": "1",
"pageSize": "1000",
"limit": "1",
}
r = requests.post(url, data=payload)
big_df = pd.DataFrame(r.json()["records"])
big_df.columns = [
"issue_price",
"emit_enty",
"coupon_type",
"plnd_issue_vlmn_str",
"issue_price_str",
"issue_date",
"bond_type",
"plnd_issue_vlmn",
"bond_name",
"bond_code",
"rtng_shrt",
"bond_period",
"defined_code",
]
return big_df
def macro_china_xfzxx():
"""
消费者信心指数
https://data.eastmoney.com/cjsj/xfzxx.html
:return: 消费者信心指数
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'ZGZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '4',
'pageNo': '1',
'pageNum': '1',
'_': '1625824314514',
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'月份',
'消费者信心指数-指数值',
'消费者信心指数-同比增长',
'消费者信心指数-环比增长',
'消费者满意指数-指数值',
'消费者满意指数-同比增长',
'消费者满意指数-环比增长',
'消费者预期指数-指数值',
'消费者预期指数-同比增长',
'消费者预期指数-环比增长',
]
temp_df['消费者信心指数-指数值'] = pd.to_numeric(temp_df['消费者信心指数-指数值'])
temp_df['消费者信心指数-同比增长'] = pd.to_numeric(temp_df['消费者信心指数-同比增长'])
temp_df['消费者信心指数-环比增长'] = pd.to_numeric(temp_df['消费者信心指数-环比增长'])
temp_df['消费者满意指数-指数值'] = pd.to_numeric(temp_df['消费者满意指数-指数值'])
temp_df['消费者满意指数-同比增长'] = pd.to_numeric(temp_df['消费者满意指数-同比增长'])
temp_df['消费者满意指数-环比增长'] = pd.to_numeric(temp_df['消费者满意指数-环比增长'])
temp_df['消费者预期指数-指数值'] = pd.to_numeric(temp_df['消费者满意指数-指数值'])
temp_df['消费者预期指数-同比增长'] = pd.to_numeric(temp_df['消费者预期指数-同比增长'])
temp_df['消费者预期指数-环比增长'] = pd.to_numeric(temp_df['消费者预期指数-环比增长'])
return temp_df
def macro_china_gyzjz():
"""
工业增加值增长
https://data.eastmoney.com/cjsj/gyzjz.html
:return: 工业增加值增长
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'ZGZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'pageNo': '1',
'pageNum': '1',
'_': '1625824314514',
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'月份',
'同比增长',
'累计增长',
]
temp_df['同比增长'] = pd.to_numeric(temp_df['同比增长'])
temp_df['累计增长'] = pd.to_numeric(temp_df['累计增长'])
return temp_df
def macro_china_reserve_requirement_ratio():
"""
存款准备金率
https://data.eastmoney.com/cjsj/ckzbj.html
:return: 存款准备金率
:rtype: pandas.DataFrame
"""
url = "https://data.eastmoney.com/DataCenter_V3/Chart/cjsj/reserverequirementratio.ashx"
params = {
"r": "0.12301106148653584",
"isxml": "false",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(
[
["20" + item for item in data_json["X"].split(",")],
[item for item in data_json["Y"][0].split(",")],
[item for item in data_json["Y"][1].split(",")],
]
).T
temp_df.columns = ["月份", "大型金融机构-调整后", "中小金融机构-调整后"]
temp_df = temp_df.astype(
{
"大型金融机构-调整后": float,
"中小金融机构-调整后": float,
}
)
return temp_df
def macro_china_consumer_goods_retail():
"""
社会消费品零售总额
http://data.eastmoney.com/cjsj/xfp.html
:return: 社会消费品零售总额
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'ZGZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625822628225',
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'月份',
'当月',
'同比增长',
'环比增长',
'累计',
'累计-同比增长',
]
temp_df['当月'] = pd.to_numeric(temp_df['当月'])
temp_df['同比增长'] = pd.to_numeric(temp_df['同比增长'])
temp_df['环比增长'] = pd.to_numeric(temp_df['环比增长'])
temp_df['累计'] = pd.to_numeric(temp_df['累计'])
temp_df['累计-同比增长'] = pd.to_numeric(temp_df['累计-同比增长'])
return temp_df
def macro_china_society_electricity():
"""
全社会用电分类情况表
http://finance.sina.com.cn/mac/#industry-6-0-31-1
:return: 全社会用电分类情况表
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/mac/api/jsonp_v3.php/SINAREMOTECALLCALLBACK1601557771972/MacPage_Service.get_pagedata"
params = {
"cate": "industry",
"event": "6",
"from": "0",
"num": "31",
"condition": "",
"_": "1601557771972",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
page_num = math.ceil(int(data_json["count"]) / 31)
big_df = pd.DataFrame(data_json["data"])
for i in range(1, page_num):
params.update({"from": i * 31})
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
temp_df = pd.DataFrame(data_json["data"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
"统计时间",
"全社会用电量",
"全社会用电量同比",
"各行业用电量合计",
"各行业用电量合计同比",
"第一产业用电量",
"第一产业用电量同比",
"第二产业用电量",
"第二产业用电量同比",
"第三产业用电量",
"第三产业用电量同比",
"城乡居民生活用电量合计",
"城乡居民生活用电量合计同比",
"城镇居民用电量",
"城镇居民用电量同比",
"乡村居民用电量",
"乡村居民用电量同比",
]
return big_df
def macro_china_society_traffic_volume():
"""
全社会客货运输量
http://finance.sina.com.cn/mac/#industry-10-0-31-1
:return: 全社会客货运输量
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/mac/api/jsonp_v3.php/SINAREMOTECALLCALLBACK1601559094538/MacPage_Service.get_pagedata"
params = {
"cate": "industry",
"event": "10",
"from": "0",
"num": "31",
"condition": "",
"_": "1601557771972",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
page_num = math.ceil(int(data_json["count"]) / 31)
big_df = pd.DataFrame(data_json["data"]["非累计"])
for i in tqdm(range(1, page_num)):
params.update({"from": i * 31})
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
temp_df = pd.DataFrame(data_json["data"]["非累计"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [item[1] for item in data_json["config"]["all"]]
return big_df
def macro_china_postal_telecommunicational():
"""
邮电业务基本情况
http://finance.sina.com.cn/mac/#industry-11-0-31-1
:return: 邮电业务基本情况
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/mac/api/jsonp_v3.php/SINAREMOTECALLCALLBACK1601624495046/MacPage_Service.get_pagedata"
params = {
"cate": "industry",
"event": "11",
"from": "0",
"num": "31",
"condition": "",
"_": "1601624495046",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
page_num = math.ceil(int(data_json["count"]) / 31)
big_df = pd.DataFrame(data_json["data"]["非累计"])
for i in tqdm(range(1, page_num)):
params.update({"from": i * 31})
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
temp_df = pd.DataFrame(data_json["data"]["非累计"])
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [item[1] for item in data_json["config"]["all"]]
return big_df
def macro_china_international_tourism_fx():
"""
国际旅游外汇收入构成
http://finance.sina.com.cn/mac/#industry-15-0-31-3
:return: 国际旅游外汇收入构成
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/mac/api/jsonp_v3.php/SINAREMOTECALLCALLBACK1601651495761/MacPage_Service.get_pagedata"
params = {
"cate": "industry",
"event": "15",
"from": "0",
"num": "31",
"condition": "",
"_": "1601624495046",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[data_text.find("{") : -3])
page_num = math.ceil(int(data_json["count"]) / 31)
big_df = pd.DataFrame(data_json["data"])
for i in tqdm(range(1, page_num)):
params.update({"from": i * 31})
| |
import argparse
import random
import os
import time
import datetime
import numpy as np
import torch
from torch import nn, autograd, optim
from torch.nn import functional as F
from torch.utils import data
import torch.distributed as dist
from torchvision import transforms, utils
from model import Generator, Discriminator
from dataset import FFHQ_Dataset
from Miscellaneous.distributed import (
reduce_loss_dict,
reduce_sum,
get_world_size,
)
from Util.network_util import Build_Generator_From_Dict
from Util.content_aware_pruning import Get_Parsing_Net, Batch_Img_Parsing, Get_Masked_Tensor
from Evaluation.fid import Get_Model_FID_Score
import lpips
# Hyper-parameters for training!
import train_hyperparams
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default=train_hyperparams.data_folder)
parser.add_argument('--size', type=int, default=train_hyperparams.generated_img_size)
parser.add_argument('--ckpt', type=str, default=train_hyperparams.ckpt)
parser.add_argument('--channel_multiplier', type=int, default=train_hyperparams.channel_multiplier)
parser.add_argument('--latent', type=int, default=train_hyperparams.latent)
parser.add_argument('--n_mlp', type=int, default=train_hyperparams.n_mlp)
parser.add_argument('--load_train_state', type=bool, default=train_hyperparams.load_train_state)
parser.add_argument('--iter', type=int, default=train_hyperparams.training_iters)
parser.add_argument('--batch_size', type=int, default=train_hyperparams.batch_size)
parser.add_argument('--lr', type=float, default=train_hyperparams.init_lr)
parser.add_argument('--r1', type=float, default=train_hyperparams.discriminator_r1)
parser.add_argument('--path_regularize', type=float, default=train_hyperparams.generator_path_reg_weight)
parser.add_argument('--path_batch_shrink', type=int, default=train_hyperparams.path_reg_batch_shrink)
parser.add_argument('--d_reg_every', type=int, default=train_hyperparams.d_reg_freq)
parser.add_argument('--g_reg_every', type=int, default=train_hyperparams.g_reg_freq)
parser.add_argument('--mixing', type=float, default=train_hyperparams.noise_mixing)
parser.add_argument('--n_sample', type=int, default=train_hyperparams.val_sample_num)
parser.add_argument('--val_sample_freq', type=int, default=train_hyperparams.val_sample_freq)
parser.add_argument('--model_save_freq', type=int, default=train_hyperparams.model_save_freq)
parser.add_argument('--fid_n_sample', type=int, default=train_hyperparams.fid_n_sample)
parser.add_argument('--fid_batch', type=int, default=train_hyperparams.fid_batch)
parser.add_argument('--teacher_ckpt', type=str, default=train_hyperparams.teacher)
parser.add_argument('--kd_l1_lambda', type=float, default=train_hyperparams.kd_l1_lambda)
parser.add_argument('--kd_lpips_lambda', type=float, default=train_hyperparams.kd_lpips_lambda)
parser.add_argument('--kd_mode', type=str, default=train_hyperparams.kd_mode)
parser.add_argument('--content_aware_KD', type=bool, default=train_hyperparams.content_aware_KD)
args = parser.parse_args()
n_gpu = len(train_hyperparams.gpu_device_ids)
device = train_hyperparams.primary_device
args.distributed = n_gpu > 1
def Print_Experiment_Status(exp_log_file):
'''
Usage:
To print out all the relevant status of
'''
experiment_status_str = '\n' + '--------------- Training Start ---------------' + '\n\n'
experiment_status_str += 'Params: ' + '\n\n' + \
' Model and Data: ' + '\n' + \
' Data Folder: ' + str(args.path) + '\n' + \
' Multi-Layer Perceptron Num Layers: ' + str(args.n_mlp) + '\n' + \
' Generator Num Layers: ' + str(args.n_latent) + '\n' + \
' Latent Variable Dimension: ' + str(args.latent) + '\n' + \
' Generated Image Size: ' + str(args.size) + '\n' + \
' Channel Multiplier: ' + str(args.channel_multiplier) + '\n' + \
' Initial Checkpoint: ' + str(args.ckpt) + '\n' + \
' Load Training State: ' + str(args.load_train_state) + '\n\n' + \
' GPU Setup: ' + '\n' + \
' Distributed Training: ' + str(args.distributed) + '\n' + \
' Primiary GPU Device: ' + device + '\n' + \
' GPU Device IDs: ' + str(train_hyperparams.gpu_device_ids) + '\n' + \
' Number of GPUs: ' + str(n_gpu) + '\n\n' + \
' Training Params: ' + '\n' + \
' Training Iterations: ' + str(args.iter) + '\n' + \
' Batch Size: ' + str(args.batch_size) + '\n' + \
' Learning Rate: ' + str(args.lr) + '\n' + \
' Generator Path Regularization Frequency: ' + str(args.g_reg_every) + '\n' + \
' Path Regularization Weight: ' + str(args.path_regularize) + '\n' + \
' Path Batch Shrink Ratio: ' + str(args.path_batch_shrink) + '\n' + \
' Discriminator Regularization Frequency: ' + str(args.d_reg_every) + '\n' + \
' Discriminator Regularization Weight: ' + str(args.r1) + '\n' + \
' Noise Mixing: ' + str(args.mixing) + '\n\n' + \
' Validation Params: ' + '\n' + \
' Number of Validated Samples: ' + str(args.n_sample) + '\n' + \
' Generate Sample Frequency: ' + str(args.val_sample_freq) + '\n' + \
' Model Saving Frequency: ' + str(args.model_save_freq) + '\n' + \
' FID Sample Num: ' + str(args.fid_n_sample) + '\n' + \
' FID Sample Batch Size: ' + str(args.fid_batch) + '\n\n'
if args.teacher_ckpt is not None:
experiment_status_str += ' Knowledge Distillation Params: ' + '\n' + \
' Teacher Checkpoint: ' + str(args.teacher_ckpt) + '\n' + \
' L1 Knowledge Distillation Weight: ' + str(args.kd_l1_lambda) + '\n' + \
' L1 Knowledge Distillation Mode: ' + str(args.kd_mode) + '\n' + \
' LPIPS Knowledge Distillation Weight: ' + str(args.kd_lpips_lambda) +'\n' + \
' Content Aware: ' + str(args.content_aware_KD) + '\n\n'
else:
experiment_status_str += ' No Knowledge Distillation' + '\n\n'
print(experiment_status_str)
exp_log_file.write(experiment_status_str)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def accumulate(model1, model2, decay=0.999):
par1 = dict(model1.named_parameters())
par2 = dict(model2.named_parameters())
for k in par1.keys():
par1[k].data.mul_(decay).add_(1 - decay, par2[k].data)
def Get_Readable_Cur_Time():
return datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
def sample_data(loader):
while True:
for batch in loader:
yield batch
def Downsample_Image_256(im_tensor):
im_tensor = F.interpolate(im_tensor, size=(256,256), mode='bilinear', align_corners=False)
return im_tensor
def KD_loss(args, teacher_g, noise, inject_index, fake_img, fake_img_list, percept_loss, parsing_net):
'''
Usage:
Define the l1 knowledge distillation loss + LPIPS loss
'''
fake_img_teacher_list = teacher_g(noise, return_rgb_list=True, inject_index=inject_index)
fake_img_teacher = fake_img_teacher_list[-1]
# Content-Aware Adjustment for fake_img and fake_img_teacher
if parsing_net is not None:
teacher_img_parsing = Batch_Img_Parsing(fake_img_teacher, parsing_net, device)
fake_img_teacher = Get_Masked_Tensor(fake_img_teacher, teacher_img_parsing, device, mask_grad=False)
fake_img = Get_Masked_Tensor(fake_img, teacher_img_parsing, device, mask_grad=True)
fake_img_teacher.requires_grad = True
# kd_l1_loss
if args.kd_mode == 'Output_Only':
kd_l1_loss = args.kd_l1_lambda * torch.mean(torch.abs(fake_img_teacher - fake_img))
elif args.kd_mode == 'Intermediate':
for fake_img_teacher in fake_img_teacher_list:
fake_img_teacher.requires_grad = True
loss_list = [torch.mean(torch.abs(fake_img_teacher - fake_img)) for (fake_img_teacher, fake_img) in zip(fake_img_teacher_list, fake_img_list)]
kd_l1_loss = args.kd_l1_lambda * sum(loss_list)
# kd_lpips_loss
if percept_loss is None:
kd_lpips_loss = torch.tensor(0.0, device=device)
else:
if args.size > train_hyperparams.LPIPS_IMAGE_SIZE: # pooled the image for LPIPS for memory saving
pooled_fake_img = Downsample_Image_256(fake_img)
pooled_fake_img_teacher = Downsample_Image_256(fake_img_teacher)
kd_lpips_loss = args.kd_lpips_lambda * torch.mean(percept_loss(pooled_fake_img, pooled_fake_img_teacher))
else:
kd_lpips_loss = args.kd_lpips_lambda * torch.mean(percept_loss(fake_img, fake_img_teacher))
return kd_l1_loss, kd_lpips_loss
def d_logistic_loss(real_pred, fake_pred):
real_loss = F.softplus(-real_pred)
fake_loss = F.softplus(fake_pred)
return real_loss.mean() + fake_loss.mean()
def d_r1_loss(real_pred, real_img):
grad_real, = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True
)
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_nonsaturating_loss(fake_pred):
loss = F.softplus(-fake_pred).mean()
return loss
def make_noise(batch, latent_dim, n_noise, device):
if n_noise == 1:
return torch.randn(batch, latent_dim, device=device)
noises = torch.randn(n_noise, batch, latent_dim, device=device).unbind(0)
return noises
def mixing_noise(batch, latent_dim, prob, device):
if prob > 0 and random.random() < prob:
return make_noise(batch, latent_dim, 2, device)
else:
return [make_noise(batch, latent_dim, 1, device)]
def index_aware_mixing_noise(batch, latent_dim, prob, n_latent, device):
'''
Usage:
A method that not only return mixed noises but also the mixing index
'''
if prob > 0 and random.random() < prob:
mixed_noises = make_noise(batch, latent_dim, 2, device)
inject_index = random.randint(1, n_latent - 1)
return mixed_noises, inject_index
else:
return [make_noise(batch, latent_dim, 1, device)], None
def D_Loss_BackProp(generator, discriminator, real_img, args, device, loss_dict, d_optim):
'''
Usage:
To update the discriminator based on the GAN loss
'''
requires_grad(generator, False)
requires_grad(discriminator, True)
noise = mixing_noise(args.batch_size, args.latent, args.mixing, device)
fake_img = generator(noise)
fake_pred = discriminator(fake_img)
real_pred = discriminator(real_img)
d_loss = d_logistic_loss(real_pred, fake_pred)
loss_dict['d'] = d_loss
loss_dict['real_score'] = real_pred.mean()
loss_dict['fake_score'] = fake_pred.mean()
discriminator.zero_grad()
d_loss.backward()
d_optim.step()
def D_Reg_BackProp(real_img, discriminator, args, d_optim):
'''
Usage:
To update the discriminator based on the regularization
'''
real_img.requires_grad = True
real_pred = discriminator(real_img)
r1_loss = d_r1_loss(real_pred, real_img)
discriminator.zero_grad()
(args.r1 / 2 * r1_loss * args.d_reg_every + 0 * real_pred[0]).backward()
d_optim.step()
return r1_loss
def G_Loss_BackProp(generator, discriminator, args, device, loss_dict, g_optim, teacher_g, percept_loss, parsing_net):
'''
Usage:
To update the generator based on the GAN loss and KD loss
'''
requires_grad(generator, True)
requires_grad(discriminator, False)
# GAN Loss
noise, inject_index = index_aware_mixing_noise(args.batch_size, args.latent, args.mixing, args.n_latent, device)
fake_img_list = generator(noise, return_rgb_list=True, inject_index=inject_index)
fake_img = fake_img_list[-1]
fake_pred = discriminator(fake_img)
g_loss = g_nonsaturating_loss(fake_pred)
loss_dict['g'] = g_loss
total_loss = g_loss
# KD Loss
if teacher_g is not None:
kd_l1_loss, kd_lpips_loss = KD_loss(args, teacher_g, noise, inject_index, fake_img, fake_img_list, percept_loss, parsing_net)
loss_dict['kd_l1_loss'] = kd_l1_loss
loss_dict['kd_lpips_loss'] = kd_lpips_loss
total_loss = g_loss + kd_l1_loss + kd_lpips_loss
generator.zero_grad()
total_loss.backward()
g_optim.step()
def G_Reg_BackProp(generator, args, mean_path_length, g_optim):
'''
Usage:
To update the generator based on the regularization
'''
path_batch_size = max(1, args.batch_size // args.path_batch_shrink)
noise = mixing_noise(path_batch_size, args.latent, args.mixing, device)
fake_img, path_lengths = generator(noise, PPL_regularize=True)
decay = 0.01
path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)
path_loss = (path_lengths - path_mean).pow(2).mean()
mean_path_length = path_mean.detach()
generator.zero_grad()
weighted_path_loss = args.path_regularize * args.g_reg_every * path_loss
if args.path_batch_shrink:
weighted_path_loss += 0 * fake_img[0, 0, 0, 0]
weighted_path_loss.backward()
g_optim.step()
mean_path_length_avg = (
reduce_sum(mean_path_length).item() / get_world_size()
)
return path_loss, path_lengths, mean_path_length, mean_path_length_avg
def train(args, loader, generator, discriminator, g_optim, d_optim, g_ema, device, teacher_g, percept_loss, parsing_net, exp_dir, exp_log_file):
sample_dir = exp_dir + '/sample/'
ckpt_dir = exp_dir + '/ckpt/'
os.mkdir(sample_dir)
os.mkdir(ckpt_dir)
g_ema_parallel = nn.DataParallel(g_ema, device_ids=train_hyperparams.gpu_device_ids)
# Experiment Statistics Setup
loader = sample_data(loader)
r1_loss = torch.tensor(0.0, device=device)
path_loss = torch.tensor(0.0, device=device)
path_lengths = torch.tensor(0.0, device=device)
mean_path_length = 0
mean_path_length_avg = 0
loss_dict = {}
if args.distributed:
g_module = generator.module
d_module = discriminator.module
else:
g_module = generator
d_module = discriminator
accum = 0.5 ** (32 / (10 * 1000))
sample_z = torch.randn(args.n_sample, args.latent, device=device)
for iter_idx in range(args.start_iter, args.iter):
time1 = time.time()
real_img = next(loader)
real_img = real_img.to(device)
time2 = time.time()
# Use GAN loss to train the discriminator
D_Loss_BackProp(generator, discriminator, real_img, args, device, loss_dict, d_optim)
# Discriminator regularization
if iter_idx % args.d_reg_every == 0:
r1_loss = D_Reg_BackProp(real_img, discriminator, | |
import unittest
from datetime import timedelta
import rx
from rx import operators as ops
from rx.testing import TestScheduler, ReactiveTest
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TimeSpan(object):
@classmethod
def from_ticks(cls, value):
return value
class TimeInterval(object):
def __init__(self, value, interval):
if isinstance(interval, timedelta):
interval = int(interval.microseconds/1000)
self.value = value
self.interval = interval
def __str__(self):
return "%s@%s" % (self.value, self.interval)
def equals(self, other):
return other.interval == self.interval and other.value == self.value
def get_hash_code(self):
return self.value.get_hash_code() ^ self.interval.get_hash_code()
def new_timer(l, t, scheduler):
timer = scheduler.create_cold_observable(on_next(t, 0), on_completed(t))
l.append(timer)
return timer
class TestGroup_join(unittest.TestCase):
def test_group_join_op_normal_i(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 280)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900)
)
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800)
)
xsd = []
ysd = []
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: new_timer(xsd, x.interval, scheduler),
lambda y: new_timer(ysd, y.interval, scheduler),
),
ops.flat_map(mapper),
)
res = scheduler.start(create=create)
assert res.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_next(830, "9rat"),
on_completed(990)]
assert xs.subscriptions == [
subscribe(200, 900)]
assert ys.subscriptions == [
subscribe(200, 800)]
def test_group_join_op_normal_ii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 200)),
on_next(720, TimeInterval(8, 100)),
on_completed(721)
)
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", (20))),
on_next(217, TimeInterval("bat", (1))),
on_next(290, TimeInterval("wag", (200))),
on_next(300, TimeInterval("pig", (10))),
on_next(305, TimeInterval("cup", (50))),
on_next(600, TimeInterval("yak", (90))),
on_next(702, TimeInterval("tin", (20))),
on_next(712, TimeInterval("man", (10))),
on_next(722, TimeInterval("rat", (200))),
on_next(732, TimeInterval("wig", (5))),
on_completed(990)
)
xsd = []
ysd = []
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: new_timer(xsd, x.interval, scheduler),
lambda y: new_timer(ysd, y.interval, scheduler),
),
ops.flat_map(mapper),
)
res = scheduler.start(create=create)
assert res.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(910)]
assert xs.subscriptions == [
subscribe(200, 721)]
assert ys.subscriptions == [
subscribe(200, 910)]
def test_group_join_op_normal_iii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, 10)),
on_next(219, TimeInterval(1, 5)),
on_next(240, TimeInterval(2, 10)),
on_next(300, TimeInterval(3, 100)),
on_next(310, TimeInterval(4, 80)),
on_next(500, TimeInterval(5, 90)),
on_next(700, TimeInterval(6, 25)),
on_next(710, TimeInterval(7, 280)),
on_next(720, TimeInterval(8, 100)),
on_next(830, TimeInterval(9, 10)),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", 20)),
on_next(217, TimeInterval("bat", 1)),
on_next(290, TimeInterval("wag", 200)),
on_next(300, TimeInterval("pig", 10)),
on_next(305, TimeInterval("cup", 50)),
on_next(600, TimeInterval("yak", 90)),
on_next(702, TimeInterval("tin", 20)),
on_next(712, TimeInterval("man", 10)),
on_next(722, TimeInterval("rat", 200)),
on_next(732, TimeInterval("wig", 5)),
on_completed(800))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval).pipe(ops.filter(lambda _: False)),
lambda y: rx.timer(y.interval).pipe(ops.filter(lambda _: False)),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_next(830, "9rat"),
on_completed(990)]
def test_group_join_op_normal_iv(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, TimeSpan.from_ticks(10))),
on_next(219, TimeInterval(1, TimeSpan.from_ticks(5))),
on_next(240, TimeInterval(2, TimeSpan.from_ticks(10))),
on_next(300, TimeInterval(3, TimeSpan.from_ticks(100))),
on_next(310, TimeInterval(4, TimeSpan.from_ticks(80))),
on_next(500, TimeInterval(5, TimeSpan.from_ticks(90))),
on_next(700, TimeInterval(6, TimeSpan.from_ticks(25))),
on_next(710, TimeInterval(7, TimeSpan.from_ticks(200))),
on_next(720, TimeInterval(8, TimeSpan.from_ticks(100))),
on_completed(990))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", TimeSpan.from_ticks(20))),
on_next(217, TimeInterval("bat", TimeSpan.from_ticks(1))),
on_next(290, TimeInterval("wag", TimeSpan.from_ticks(200))),
on_next(300, TimeInterval("pig", TimeSpan.from_ticks(10))),
on_next(305, TimeInterval("cup", TimeSpan.from_ticks(50))),
on_next(600, TimeInterval("yak", TimeSpan.from_ticks(90))),
on_next(702, TimeInterval("tin", TimeSpan.from_ticks(20))),
on_next(712, TimeInterval("man", TimeSpan.from_ticks(10))),
on_next(722, TimeInterval("rat", TimeSpan.from_ticks(200))),
on_next(732, TimeInterval("wig", TimeSpan.from_ticks(5))),
on_completed(980))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(990)]
def test_group_join_op_normal_v(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, TimeSpan.from_ticks(10))),
on_next(219, TimeInterval(1, TimeSpan.from_ticks(5))),
on_next(240, TimeInterval(2, TimeSpan.from_ticks(10))),
on_next(300, TimeInterval(3, TimeSpan.from_ticks(100))),
on_next(310, TimeInterval(4, TimeSpan.from_ticks(80))),
on_next(500, TimeInterval(5, TimeSpan.from_ticks(90))),
on_next(700, TimeInterval(6, TimeSpan.from_ticks(25))),
on_next(710, TimeInterval(7, TimeSpan.from_ticks(200))),
on_next(720, TimeInterval(8, TimeSpan.from_ticks(100))),
on_completed(990))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", TimeSpan.from_ticks(20))),
on_next(217, TimeInterval("bat", TimeSpan.from_ticks(1))),
on_next(290, TimeInterval("wag", TimeSpan.from_ticks(200))),
on_next(300, TimeInterval("pig", TimeSpan.from_ticks(10))),
on_next(305, TimeInterval("cup", TimeSpan.from_ticks(50))),
on_next(600, TimeInterval("yak", TimeSpan.from_ticks(90))),
on_next(702, TimeInterval("tin", TimeSpan.from_ticks(20))),
on_next(712, TimeInterval("man", TimeSpan.from_ticks(10))),
on_next(722, TimeInterval("rat", TimeSpan.from_ticks(200))),
on_next(732, TimeInterval("wig", TimeSpan.from_ticks(5))),
on_completed(900))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(990)]
def test_group_join_op_normal_vi(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, TimeSpan.from_ticks(10))),
on_next(219, TimeInterval(1, TimeSpan.from_ticks(5))),
on_next(240, TimeInterval(2, TimeSpan.from_ticks(10))),
on_next(300, TimeInterval(3, TimeSpan.from_ticks(100))),
on_next(310, TimeInterval(4, TimeSpan.from_ticks(80))),
on_next(500, TimeInterval(5, TimeSpan.from_ticks(90))),
on_next(700, TimeInterval(6, TimeSpan.from_ticks(25))),
on_next(710, TimeInterval(7, TimeSpan.from_ticks(30))),
on_next(720, TimeInterval(8, TimeSpan.from_ticks(200))),
on_next(830, TimeInterval(9, TimeSpan.from_ticks(10))),
on_completed(850))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", TimeSpan.from_ticks(20))),
on_next(217, TimeInterval("bat", TimeSpan.from_ticks(1))),
on_next(290, TimeInterval("wag", TimeSpan.from_ticks(200))),
on_next(300, TimeInterval("pig", TimeSpan.from_ticks(10))),
on_next(305, TimeInterval("cup", TimeSpan.from_ticks(50))),
on_next(600, TimeInterval("yak", TimeSpan.from_ticks(90))),
on_next(702, TimeInterval("tin", TimeSpan.from_ticks(20))),
on_next(712, TimeInterval("man", TimeSpan.from_ticks(10))),
on_next(722, TimeInterval("rat", TimeSpan.from_ticks(20))),
on_next(732, TimeInterval("wig", TimeSpan.from_ticks(5))),
on_completed(900))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, "7man"),
on_next(720, "8tin"),
on_next(720, "8man"),
on_next(722, "6rat"),
on_next(722, "7rat"),
on_next(722, "8rat"),
on_next(732, "7wig"),
on_next(732, "8wig"),
on_completed(920)]
def test_group_join_op_normal_vii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(210))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", TimeSpan.from_ticks(20))),
on_next(217, TimeInterval("bat", TimeSpan.from_ticks(1))),
on_next(290, TimeInterval("wag", TimeSpan.from_ticks(200))),
on_next(300, TimeInterval("pig", TimeSpan.from_ticks(10))),
on_next(305, TimeInterval("cup", TimeSpan.from_ticks(50))),
on_next(600, TimeInterval("yak", TimeSpan.from_ticks(90))),
on_next(702, TimeInterval("tin", TimeSpan.from_ticks(20))),
on_next(712, TimeInterval("man", TimeSpan.from_ticks(10))),
on_next(722, TimeInterval("rat", TimeSpan.from_ticks(20))),
on_next(732, TimeInterval("wig", TimeSpan.from_ticks(5))),
on_completed(900))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval).pipe(ops.filter(lambda _: False)),
lambda y: rx.timer(y.interval).pipe(ops.filter(lambda _: False)),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [on_completed(210)]
def test_group_join_op_normal_viii(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_next(210, TimeInterval(0, TimeSpan.from_ticks(200))))
ys = scheduler.create_hot_observable(
on_next(220, TimeInterval("hat", TimeSpan.from_ticks(100))),
on_completed(230))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create)
assert results.messages == [on_next(220, "0hat")]
def test_group_join_op_normal_ix(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(210, TimeInterval(0, TimeSpan.from_ticks(10))),
on_next(219, TimeInterval(1, TimeSpan.from_ticks(5))),
on_next(240, TimeInterval(2, TimeSpan.from_ticks(10))),
on_next(300, TimeInterval(3, TimeSpan.from_ticks(100))),
on_next(310, TimeInterval(4, TimeSpan.from_ticks(80))),
on_next(500, TimeInterval(5, TimeSpan.from_ticks(90))),
on_next(700, TimeInterval(6, TimeSpan.from_ticks(25))),
on_next(710, TimeInterval(7, TimeSpan.from_ticks(300))),
on_next(720, TimeInterval(8, TimeSpan.from_ticks(100))),
on_next(830, TimeInterval(9, TimeSpan.from_ticks(10))),
on_completed(900))
ys = scheduler.create_hot_observable(
on_next(215, TimeInterval("hat", TimeSpan.from_ticks(20))),
on_next(217, TimeInterval("bat", TimeSpan.from_ticks(1))),
on_next(290, TimeInterval("wag", TimeSpan.from_ticks(200))),
on_next(300, TimeInterval("pig", TimeSpan.from_ticks(10))),
on_next(305, TimeInterval("cup", TimeSpan.from_ticks(50))),
on_next(600, TimeInterval("yak", TimeSpan.from_ticks(90))),
on_next(702, TimeInterval("tin", TimeSpan.from_ticks(20))),
on_next(712, TimeInterval("man", TimeSpan.from_ticks(10))),
on_next(722, TimeInterval("rat", TimeSpan.from_ticks(200))),
on_next(732, TimeInterval("wig", TimeSpan.from_ticks(5))),
on_completed(800))
def create():
def mapper(x_yy):
x, yy = x_yy
return yy.pipe(ops.map(lambda y: '{}{}'.format(x.value, y.value)))
return xs.pipe(
ops.group_join(
ys,
lambda x: rx.timer(x.interval),
lambda y: rx.timer(y.interval),
),
ops.flat_map(mapper),
)
results = scheduler.start(create=create, disposed=713)
assert results.messages == [
on_next(215, "0hat"),
on_next(217, "0bat"),
on_next(219, "1hat"),
on_next(300, "3wag"),
on_next(300, "3pig"),
on_next(305, "3cup"),
on_next(310, "4wag"),
on_next(310, "4pig"),
on_next(310, "4cup"),
on_next(702, "6tin"),
on_next(710, "7tin"),
on_next(712, "6man"),
on_next(712, | |
scorer=None, ckpt_every=0, printing=False, min_length=0, **kwargs):
assert top_p == 1.0, "For now, the top_p implementation does not work, as the sampling on GPU will crash randomly"
timings = False
T = time.time()
if timings:
print("------------")
force_start_ids = []
if force_start is not None:
force_start_ids = self.tokenizer.encode(force_start, add_special_tokens=False)
if self.model_card == "facebook/bart-large-cnn":
force_start_ids = [0]
batch_size = len(bodies)
N = batch_size * beam_size
expanded_inputs = [enc_inp for enc_inp in bodies for _ in range(beam_size)]
inputs = self.preprocess_input(bodies)
if timings:
print("tokenization", time.time()-T)
T = time.time()
build_up = torch.LongTensor([self.start_id]).repeat(N, 1).to(self.device)
seq_logprobs = torch.zeros((N)).to(self.device)
scores = torch.zeros((N)).to(self.device)
one_every_k = torch.FloatTensor([1] + [0] * (beam_size-1)).repeat(batch_size*beam_size).to(self.device)
# Sometimes, we process the same input, as we run it once as a sampled, and once as an argmax, in which case we should reuse the computation
past = self.encode(inputs)
# print("OVER HERE:", len(past), len(past[0]), past[0][0].shape)
past = self.past_repeat_interleave(past, beam_size)
inputs_repeated = torch.repeat_interleave(inputs[0], repeats=beam_size, dim=0)
end_id = self.tokenizer.eos_token_id
finished_func = lambda build_up: all([end_id in build for build in build_up[:, 1:]])
next_force_split = False
while build_up.shape[1] < max_output_length and not finished_func(build_up):
is_force_start = len(force_start_ids) > 0 and build_up.shape[1] <= len(force_start_ids)
logits, past = self.decode_fast(build_up, past)
logits = logits.view(N, -1)
logits = utils_sampling.ngram_copy_filtering(build_up, inputs_repeated, logits, n_gram=no_copy_ngram)
logits = utils_sampling.ngram_copy_filtering(build_up, build_up, logits, n_gram=no_repeat_ngram)
if sample:
logits = utils_sampling.top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
if min_length > 0 and build_up.shape[1] <= min_length and not is_force_start:
logits[:, end_id] -= float("Inf")
logprobs = torch.nn.functional.log_softmax(logits, dim=-1)
if is_force_start:
force_idx = build_up.shape[1]-1
all_selects = torch.LongTensor([force_start_ids[force_idx]]).repeat(N, beam_size).to(self.device)
elif sample:
probs = torch.nn.functional.softmax(logits/temperature, dim=-1)
distrib = torch.distributions.categorical.Categorical(probs)
all_selects = torch.cat([distrib.sample().unsqueeze(-1) for _ in range(beam_size)], dim=1)
# torch.multinomial basically doesn't work in torch 1.6+ and in 1.5.1- it throws a bug when coupled with
# all_selects = torch.multinomial(probs, beam_size)
else:
_, all_selects = torch.topk(logprobs, k=beam_size, dim=-1)
not_finished = (1-torch.any(build_up[:, 1:]==end_id, dim=1).float()).to(self.device)
expanded_not_finished = torch.repeat_interleave(not_finished, repeats=beam_size)
expanded_logprobs = torch.repeat_interleave(seq_logprobs, repeats=beam_size) # This should be batch_size * beam_size²
expanded_logprobs += expanded_not_finished * logprobs[torch.repeat_interleave(torch.arange(N), repeats=beam_size), all_selects.view(-1)]
# expanded_logprobs += logprobs[torch.repeat_interleave(torch.arange(N), repeats=beam_size), all_selects.view(-1)]
# We don't want you to select from finished beams
expanded_logprobs -= (1-expanded_not_finished)*(1-one_every_k)*1000.0
expanded_score = expanded_logprobs # This is if we don't have a scorer
batched_logprobs = expanded_logprobs.view(batch_size, -1)
batched_scores = expanded_score.view(batch_size, -1)
if build_up.shape[1] == 1 or (len(force_start_ids) == build_up.shape[1]-1) or next_force_split:
# print("Force splitting is going to happen")
# Force the model to differ in path: on (1) the first token generated, or (2) the first token generated after the force_start track
choices = torch.arange(beam_size, device=self.device).repeat(batch_size)
batched_choices = choices.view(batch_size, beam_size)
next_force_split = False
else:
_, batched_choices = torch.topk(batched_scores, k=beam_size, dim=1) # Going from k² choices per element to k choices.
batched_tracks = batched_choices // beam_size
tracks = beam_size*torch.repeat_interleave(torch.arange(batch_size), repeats=beam_size).to(self.device) + batched_tracks.view(-1)
selected_scores = batched_scores[torch.repeat_interleave(torch.arange(batch_size), repeats=beam_size), batched_choices.view(-1)]
selected_logprobs = batched_logprobs[torch.repeat_interleave(torch.arange(batch_size), repeats=beam_size), batched_choices.view(-1)]
# Figure out the kept words to be added to the build-up
per_batch_selects = all_selects.view(batch_size, -1)
next_words = per_batch_selects[torch.repeat_interleave(torch.arange(batch_size), repeats=beam_size), batched_choices.view(-1)]
next_words = next_words.unsqueeze(1)
# print("ABC", next_words)
not_finished = not_finished[tracks] # Rewire the not_finished
next_words = (next_words * not_finished.view(-1, 1).long()) # + (1-not_finished.view(-1, 1).long()) * end_id # This is so that nothing gets written past end_token but other end_tokens
# if build_up.shape[1] >= 82:
# print(build_up.shape[1], not_finished.long(), next_words)
# print("AB",next_words.shape)
# print(">>>>", next_words)
# [BOOKKEEPING] Going from k² to k options at each time means we have to swap all the caches around: past, build-up
# print("!!!", not_finished)
# not_finished = not_finished[tracks] # Need to recompute of not_finished after reshuffle
# print("<<<", not_finished)
build_up = build_up[tracks, :]
past = self.past_beam_bookkeeping(past, tracks)
# Update the latest scores, and the current_build
build_up = torch.cat((build_up, next_words), dim=1)
# print("[NL]", build_up)
# print("%d" % (build_up.shape[1]), self.tokenizer.batch_decode(build_up.tolist()))
scores = selected_scores.view(-1)
seq_logprobs = selected_logprobs.view(-1)
if ckpt_every > 0 and (build_up.shape[1]-1) % ckpt_every == 0:
# NEED TO CHECKPOINT
generated_so_far = self.toks2text_batch(build_up)
if printing:
print("============== CKPT %d =================" % (build_up.shape[1]-1))
print("Options:")
for option in generated_so_far:
print(option)
print("-----------")
so_far_scores = scorer(expanded_inputs, generated_so_far, partial=True, printing=printing)
so_far_scores = torch.FloatTensor(so_far_scores["total_scores"]).cuda()
folded_scores = so_far_scores.view(-1, beam_size)
best_idx_in_each = torch.argmax(folded_scores, dim=-1)
chosen_tracks = torch.arange(0, N, step=beam_size).cuda() + best_idx_in_each
chosen_tracks = torch.repeat_interleave(chosen_tracks, repeats=beam_size, dim=0)
# Pump it back up to k candidates; not seq2seq compatible for now
build_up = build_up[chosen_tracks]
scores = scores[chosen_tracks]
past = self.past_beam_bookkeeping(past, chosen_tracks)
next_force_split = True
if timings:
print("loop", time.time()-T)
T = time.time()
# print("[NL]", build_up.shape, build_up)
batched_build_up = build_up.view(batch_size, beam_size, -1)
batched_logprobs = seq_logprobs.view(batch_size, -1)
batched_scores = scores.view(batch_size, -1)
outputs = []
for orig_beams, beam_logprobs, beam_scores in zip(batched_build_up, batched_logprobs, batched_scores):
output_txts, beams = self.toks2text_batch(orig_beams, return_tokens=True)
outputs.append([{"output_text": out_txt, "output_tokens": beam, "orig_output_tokens": orig_beam.tolist(), "logprob": lp.item(), "score": score.item()}
for out_txt, beam, orig_beam, lp, score in zip(output_txts, beams, orig_beams, beam_logprobs, beam_scores)])
if timings:
print("outputs", time.time()-T)
T = time.time()
return outputs
def generate(self, bodies, max_batch_size=8, beam_size=1, ckpt_runs=1, num_runs=1, progress=False, sort_score=False, keep_unique=False, **kwargs):
# This function batches the generation and adds functionality for `num_runs` (running k independent runs of the same input), and dispatches it to the correct generation method:
# `generate_beam_batch` if beam_size>1 (requires beam_size)
# `generate_ckpt_batch` if ckpt_runs>1 (requires scorer, ckpt_runs, ckpt_every)
# `generate_batch` otherwise
assert not (beam_size > 1 and ckpt_runs > 1), "Cannot ask for beam search and ckpt generation at the same time"
if ckpt_runs > 1:
assert "ckpt_every" in kwargs and "scorer" in kwargs, "Required parameters were not fed to the generation function."
N_start = len(bodies)
if num_runs > 1:
bodies = [body for body in bodies for i in range(num_runs)]
N = len(bodies)
outputs = []
iterator = range(0, N, max_batch_size)
if progress:
iterator = tqdm.tqdm(iterator)
for i in iterator:
batch_bodies = bodies[i:min(N, i+max_batch_size)]
with torch.no_grad():
if beam_size > 1:
# print("Will run generate beam batch")
batch_outputs = self.generate_beam_batch(batch_bodies, beam_size=beam_size, **kwargs)
elif ckpt_runs > 1:
# print("Will run generate ckpt batch")
batch_outputs = self.generate_ckpt_batch(batch_bodies, ckpt_runs=ckpt_runs, **kwargs)
else:
# print("Will run generate batch")
batch_outputs = self.generate_batch(batch_bodies, **kwargs)
outputs += batch_outputs
if num_runs > 1:
# Refold the number of runs into N outputs
final_outputs = []
for i in range(N_start):
all_runs = outputs[num_runs*i:(num_runs*(i+1))]
if beam_size > 1:
all_runs = [beam for beams in all_runs for beam in beams] # Unfold
if sort_score:
sort_key = "score" if "score" in all_runs[0] else "logprob"
all_runs = sorted(all_runs, key=lambda o: -o[sort_key])
if keep_unique:
already_outputs = set([])
unique_runs = []
for run in all_runs:
if run["output_text"] not in already_outputs:
unique_runs.append(run)
already_outputs.add(run["output_text"])
all_runs = unique_runs
final_outputs.append(all_runs)
outputs = final_outputs
return outputs
if __name__ == "__main__":
# import difflib, os
import utils_misc
MODELS_FOLDER = os.environ["MODELS_FOLDER"]
utils_misc.select_freer_gpu()
################### TESTING GPT2 SIMPLIFIER #####################
# model = Generator("gpt2-medium", max_output_length=90, device='cuda')
# model.reload(os.path.join(MODELS_FOLDER, "simplifier/gen_mediumc_lamb_1.bin"))
# text = "The revamped MoMA will not be a single narrative of one history. Rather, it will be a collection of perspectives, according to <NAME>. She is the museum's chief curator of painting and sculpture."
# print("Original")
# print(text)
# print("========================")
# generated_texts = model.generate([text], beam_size=1, num_runs=10)[0]
# for generated_beam in generated_texts:
# print("==================")
# print("[%.3f]\n%s" % (generated_beam['score'], generated_beam['output_text']))
################### TESTING BART QGEN #####################
# model = Generator("facebook/bart-large", max_output_length=90, device="cuda", seq2seq=True)
# model.reload("/mnt/results/gen_qgen_bart_logprob_1.993.bin")
# outputs = model.generate([text], beam_size=3, num_runs=3, sample=False, sort_score=True) # , force_start="How is"
# for beams in outputs:
# print( "====================")
# for beam in beams:
# print("[%.3f]\n%s" % (beam['score'], beam['output_text']))
################### TESTING COPIER #####################
# model = Generator("facebook/bart-base", max_output_length=90, device="cuda", seq2seq=True)
# model = Generator("gpt2-medium", max_output_length=90, device="cuda")
# model.reload("/home/phillab/models/gpt2_med_lede2.bin")
# # model.reload("/home/phillab/models/gen_gpt2_med_cp90_logprob_0.001.bin")
# # model.reload("/home/phillab/models/gpt2_med_cp90.bin")
# model.model.half().eval()
# text = "Romanian villagers have re-elected their mayor by a landslide even though he died two weeks ago from Covid-19 complications. They said he had done a good job and deserved his posthumous victory.
# And the also added something else which I forgot for now."
# with torch.no_grad():
# outputs = model.generate([text], num_runs=32, sample=True, no_copy_ngram=7, sort_score=True) # , force_start="How is"
# for beams in outputs:
# print( "====================")
# for beam in beams:
# print("[%.3f]\n%s" % (beam['logprob'], beam['output_text']))
################### TESTING QGEN #####################
# text = "With most concerts, events, and international travel still off limits for Americans, national and state parks have seen a dramatic uptick in | |
"""Tests the automated search for characteristic over cryptographic primitives."""
import warnings
from datetime import datetime
import collections
import functools
import os
import unittest
from cascada.differential.difference import XorDiff, RXDiff
from cascada.linear.mask import LinearMask
from cascada.algebraic.value import BitValue, WordValue
from cascada.smt.chsearch import (
ChModelAssertType, PrintingMode, INCREMENT_NUM_ROUNDS, MissingVarWarning,
round_based_ch_search, round_based_cipher_ch_search,
_get_smart_print
)
from cascada.smt.invalidpropsearch import (
round_based_invalidprop_search, round_based_invalidcipherprop_search
)
from cascada.primitives import blockcipher
from cascada.primitives import aes
from cascada.primitives import cham
from cascada.primitives import chaskey
from cascada.primitives import feal
from cascada.primitives import hight
from cascada.primitives import lea
from cascada.primitives import multi2
from cascada.primitives import picipher
from cascada.primitives import shacal1
from cascada.primitives import shacal2
from cascada.primitives import speck
from cascada.primitives import simeck
from cascada.primitives import simon
from cascada.primitives import skinny64
from cascada.primitives import skinny128
from cascada.primitives import tea
from cascada.primitives import xtea
SKIP_LONG_TESTS = True
SKIP_INVALID_LONG_TESTS = True
OUTPUT_FILE = False
PRINTING_MODE = PrintingMode.Silent
COMPUTE_EW = True
CHAM64 = cham.get_CHAM_instance(cham.CHAMInstance.CHAM_64_128)
Speck32 = speck.get_Speck_instance(speck.SpeckInstance.speck_32_64)
Simeck32 = simeck.get_Simeck_instance(simeck.SimeckInstance.simeck_32_64)
Simon32 = simon.get_Simon_instance(simon.SimonInstance.simon_32_64)
# function can be BvFunction or Cipher
# xor_nr is the maximum number of rounds with probability-one XOR characteristics
# cipher_xor_nr is the maximum number of rounds with probability-one XOR cipher characteristics
# (nr=True means probability-one characteristics for many number of rounds)
# (nr=False means no probability-one characteristics even for the minimum number of rounds)
# (nr=None means no search is done)
SearchParameters = collections.namedtuple(
'SearchParameters', ['function', 'xor_nr', 'rk_nr', 'linear_nr',
'cipher_xor_nr', 'cipher_rx_nr',
'ignore_cipher_xor_invalid', 'ignore_cipher_rx_invalid',
'ignore_bitvalue'],
defaults=[None, None, None, None, None, None, None, None] # all except function
)
ListSearchParameters = [
SearchParameters(aes.AESCipher, xor_nr=False, linear_nr=False, cipher_xor_nr=1, ignore_bitvalue=True, ignore_cipher_rx_invalid=True),
SearchParameters(aes.AESCipher.key_schedule, xor_nr=3, linear_nr=True, ignore_bitvalue=True),
SearchParameters(CHAM64, xor_nr=4, rk_nr=None, linear_nr=7, cipher_xor_nr=10, cipher_rx_nr=False),
SearchParameters(CHAM64.key_schedule, xor_nr=True, rk_nr=True), # linear_nr=True but unused input vars
SearchParameters(chaskey.ChaskeyPi, xor_nr=1, rk_nr=False, linear_nr=1),
SearchParameters(feal.FEALCipher, xor_nr=3, rk_nr=None, linear_nr=None, cipher_xor_nr=3, cipher_rx_nr=None), # BvAddCt in Enc
SearchParameters(feal.FEALCipher.key_schedule, xor_nr=1, rk_nr=None, linear_nr=None), # BvAddCt in KS
SearchParameters(hight.HightCipher, xor_nr=3, rk_nr=None, linear_nr=4, cipher_xor_nr=7, cipher_rx_nr=False), # nr+1 due to key-whitening
SearchParameters(hight.HightCipher.key_schedule, xor_nr=True, rk_nr=1, linear_nr=None), # BvAddCt
SearchParameters(lea.LEACipher, xor_nr=2, rk_nr=None, linear_nr=2), # cipher_xor_nr=2 but 10+min, cipher_rk_nr discards many ch due to EW
SearchParameters(lea.LEACipher.key_schedule, xor_nr=2, rk_nr=None, linear_nr=None), # BvAddCt, xor_nr slow, rk discards many ch due to EW
SearchParameters(multi2.MULTI2Cipher, xor_nr=2, rk_nr=None, linear_nr=3, cipher_xor_nr=2, cipher_rx_nr=False),
SearchParameters(multi2.MULTI2Cipher.key_schedule, xor_nr=True, rk_nr=False, linear_nr=None), # BvAddCt
SearchParameters(picipher.PiPermutation, xor_nr=False, rk_nr=None, linear_nr=None), # # weight rk > 200, BvAddCt
SearchParameters(shacal1.SHACAL1Cipher, xor_nr=False, cipher_xor_nr=True), # xor 1r but mnr=4, BvIf, cipher_xor 16r but slow, rk discards many ch due to EW
SearchParameters(shacal1.SHACAL1Cipher.key_schedule, xor_nr=True, rk_nr=False, linear_nr=None), # BvAddCt, xor 19r but slow
SearchParameters(shacal2.SHACAL2Cipher, xor_nr=False, cipher_xor_nr=True), # xor 1r but mnr=4, BvIf, cipher_xor 18r but slow, rk discards many ch due to EW
SearchParameters(shacal2.SHACAL2Cipher.key_schedule, xor_nr=True, rk_nr=False, linear_nr=None), # BvAddCt, xor 18r but slow
SearchParameters(Speck32, xor_nr=1, rk_nr=None, linear_nr=2, cipher_xor_nr=2, cipher_rx_nr=False),
SearchParameters(Speck32.key_schedule, xor_nr=4, rk_nr=False, linear_nr=True),
SearchParameters(Simeck32, xor_nr=1, rk_nr=None, linear_nr=None, cipher_xor_nr=5, cipher_rx_nr=6, ignore_bitvalue=True),
SearchParameters(Simeck32.key_schedule, xor_nr=7, rk_nr=8, linear_nr=None, ignore_bitvalue=True),
SearchParameters(Simon32, xor_nr=1, rk_nr=None, linear_nr=None, cipher_xor_nr=5, cipher_rx_nr=6, ignore_bitvalue=True),
SearchParameters(Simon32.key_schedule, xor_nr=True, rk_nr=True, linear_nr=None, ignore_bitvalue=True), # linear ks
SearchParameters(skinny64.SKINNYCipher, xor_nr=False, linear_nr=False, ignore_bitvalue=True, ignore_cipher_xor_invalid=True, ignore_cipher_rx_invalid=True), # cipher_xor_nr requires ignore_first_sub_cells
SearchParameters(skinny64.SKINNYCipher.key_schedule, xor_nr=True, linear_nr=True, ignore_bitvalue=True),
SearchParameters(skinny128.SKINNYCipher, xor_nr=False, linear_nr=False, ignore_bitvalue=True, ignore_cipher_xor_invalid=True, ignore_cipher_rx_invalid=True), # cipher_xor_nr requires ignore_first_sub_cells
SearchParameters(skinny128.SKINNYCipher.key_schedule, xor_nr=True, linear_nr=True, ignore_bitvalue=True),
SearchParameters(tea.TEACipher, xor_nr=False, rk_nr=None, linear_nr=None, cipher_xor_nr=True, cipher_rx_nr=None, ignore_cipher_xor_invalid=True), # BvAddCt, rk huge error
SearchParameters(xtea.XTEACipher, xor_nr=1, rk_nr=None, linear_nr=3, cipher_xor_nr=8, cipher_rx_nr=False, ignore_cipher_xor_invalid=True),
SearchParameters(xtea.XTEACipher.key_schedule, xor_nr=True, rk_nr=1, linear_nr=None), # BvAddCt
]
zip = functools.partial(zip, strict=True)
class TestPrimitivesSearch(unittest.TestCase):
"""Test automated characteristic search of the primitives implemented."""
@classmethod
def setUpClass(cls):
if OUTPUT_FILE:
date_string = datetime.strftime(datetime.now(), '%d-%H-%M')
filename = "output_test_primitives_smt" + date_string + ".txt"
assert not os.path.isfile(filename)
cls.filename = filename
else:
cls.filename = None
warnings.filterwarnings("ignore", category=ImportWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=MissingVarWarning)
@unittest.skipIf(SKIP_LONG_TESTS, "test_ch_search")
def test_ch_search(self):
extra_ch_finder_args = {
"raise_exception_missing_var": False,
"exclude_zero_input_prop": True,
"printing_mode": PRINTING_MODE,
"filename": self.__class__.filename,
}
extra_findnextchweight_args = None
if COMPUTE_EW:
extra_findnextchweight_args = {
"initial_weight": 0,
"empirical_weight_options": {"C_code": True, "split_by_max_weight": 10}
}
if PRINTING_MODE != PrintingMode.Silent:
smart_print = _get_smart_print(self.__class__.filename)
smart_print(f"# {self.test_ch_search.__name__}")
for sp in ListSearchParameters:
f = sp.function
for nr, prop_type in zip(
[sp.xor_nr, sp.rk_nr, sp.linear_nr, True, True],
[XorDiff, RXDiff, LinearMask, BitValue, WordValue]
):
if nr is None:
continue
for at in [ChModelAssertType.ProbabilityOne, ChModelAssertType.ValidityAndWeight]:
if prop_type in [BitValue, WordValue] and at == ChModelAssertType.ValidityAndWeight:
continue
if prop_type == BitValue and sp.ignore_bitvalue is True:
continue
msg = f"\n## {f.__name__}, nr: {nr}, prop_type: {prop_type.__name__}, at: {at}"
if PRINTING_MODE != PrintingMode.Silent:
smart_print(msg)
round_ch_iterator = round_based_ch_search(
f, nr if (nr is not True and nr is not False) else getattr(f, "_min_num_rounds", 1),
nr + 1 if (nr is not True and nr is not False) else getattr(f, "_min_num_rounds", 1) + 1,
prop_type, at, "btor",
extra_chfinder_args=extra_ch_finder_args,
extra_findnextchweight_args=extra_findnextchweight_args
)
if nr is not False:
try:
num_rounds_found, ch_found = next(round_ch_iterator)
except StopIteration:
raise StopIteration(msg)
target_nr = nr if nr is not True else getattr(f, "_min_num_rounds", 1)
self.assertEqual(num_rounds_found, target_nr, msg=msg)
if PRINTING_MODE != PrintingMode.Silent:
if PRINTING_MODE == PrintingMode.WeightsAndSrepr:
smart_print(ch_found.srepr())
else:
smart_print(ch_found)
smart_print("\n".join(ch_found.get_formatted_logged_msgs()))
round_ch_iterator.send(INCREMENT_NUM_ROUNDS)
msg += "\n||| probability-one characteristic found"
if nr is not True:
try:
num_rounds_found, ch_found = next(round_ch_iterator)
except StopIteration:
num_rounds_found, ch_found = None, None
if at == ChModelAssertType.ProbabilityOne:
self.assertIsNone(num_rounds_found, msg=msg)
self.assertIsNone(ch_found, msg=msg)
else:
if nr is False:
# INCREMENT_NUM_ROUNDS not sent
target_nr = getattr(f, "_min_num_rounds", 1)
else:
target_nr = nr + 1
self.assertEqual(num_rounds_found, target_nr, msg=msg)
self.assertGreater(ch_found.ch_weight, 0, msg=msg)
if PRINTING_MODE != PrintingMode.Silent:
if PRINTING_MODE == PrintingMode.WeightsAndSrepr:
smart_print(ch_found.srepr())
else:
smart_print(ch_found)
smart_print("\n".join(ch_found.get_formatted_logged_msgs()))
@unittest.skipIf(SKIP_LONG_TESTS, "test_cipher_ch_search")
def test_cipher_ch_search(self):
extra_cipherchfinder_args = {
"raise_exception_missing_var": False,
"ks_exclude_zero_input_prop": False,
"enc_exclude_zero_input_prop": True,
"printing_mode": PRINTING_MODE,
"filename": self.__class__.filename,
}
extra_findnextchweight_args = None
if COMPUTE_EW:
extra_findnextchweight_args = {
"initial_weight": 0,
"ks_empirical_weight_options": {"C_code": True, "split_by_max_weight": 10},
"enc_empirical_weight_options": {"C_code": True, "split_by_max_weight": 10},
}
if PRINTING_MODE != PrintingMode.Silent:
smart_print = _get_smart_print(self.__class__.filename)
smart_print(f"# {self.test_cipher_ch_search.__name__}")
for sp in ListSearchParameters:
f = sp.function
for nr, prop_type in zip(
[sp.cipher_xor_nr, sp.cipher_rx_nr, True, True],
[XorDiff, RXDiff, BitValue, WordValue]
):
if nr is None or not issubclass(f, blockcipher.Cipher):
continue
for at in [ChModelAssertType.ProbabilityOne, ChModelAssertType.ValidityAndWeight]:
if prop_type in [BitValue, WordValue] and at == ChModelAssertType.ValidityAndWeight:
continue
if prop_type == BitValue and sp.ignore_bitvalue is True:
continue
msg = f"\n## {f.__name__}, nr: {nr}, prop_type: {prop_type.__name__}, at: {at}"
if PRINTING_MODE != PrintingMode.Silent:
smart_print(msg)
round_ch_iterator = round_based_cipher_ch_search(
f, nr if (nr is not True and nr is not False) else getattr(f, "_min_num_rounds", 1),
nr + 1 if (nr is not True and nr is not False) else getattr(f, "_min_num_rounds", 1) + 1,
prop_type, at, at, "btor",
extra_cipherchfinder_args=extra_cipherchfinder_args,
extra_findnextchweight_args=extra_findnextchweight_args
)
if nr is not False:
try:
num_rounds_found, ch_found = next(round_ch_iterator)
except StopIteration:
raise StopIteration(msg)
target_nr = nr if nr is not True else getattr(f, "_min_num_rounds", 1)
self.assertEqual(num_rounds_found, target_nr, msg=msg)
if PRINTING_MODE != PrintingMode.Silent:
if PRINTING_MODE == PrintingMode.WeightsAndSrepr:
smart_print(ch_found.srepr())
else:
smart_print(ch_found)
smart_print("\n".join(ch_found.get_formatted_logged_msgs()))
round_ch_iterator.send(INCREMENT_NUM_ROUNDS)
msg += "\n||| probability-one characteristic found"
if nr is not True:
try:
num_rounds_found, ch_found = next(round_ch_iterator)
except StopIteration:
num_rounds_found, ch_found = None, None
if at == ChModelAssertType.ProbabilityOne:
self.assertIsNone(num_rounds_found, msg=msg)
self.assertIsNone(ch_found, msg=msg)
else:
if nr is False:
# INCREMENT_NUM_ROUNDS not sent
target_nr = getattr(f, "_min_num_rounds", 1)
else:
target_nr = nr + 1
self.assertEqual(num_rounds_found, target_nr, msg=msg)
ch_weight = ch_found.ks_characteristic.ch_weight
ch_weight += ch_found.enc_characteristic.ch_weight
self.assertGreater(ch_weight, 0, msg=msg)
if PRINTING_MODE != PrintingMode.Silent:
if PRINTING_MODE == PrintingMode.WeightsAndSrepr:
smart_print(ch_found.srepr())
else:
smart_print(ch_found)
smart_print("\n".join(ch_found.get_formatted_logged_msgs()))
@unittest.skipIf(SKIP_INVALID_LONG_TESTS, "test_invalid_prop_search")
def test_invalid_prop_search(self):
extra_invalidpropfinder_args = {
"printing_mode": PRINTING_MODE,
"filename": self.__class__.filename,
}
if PRINTING_MODE != PrintingMode.Silent:
smart_print = _get_smart_print(self.__class__.filename)
smart_print(f"# {self.test_invalid_prop_search.__name__}")
for sp in ListSearchParameters:
f = sp.function
min_nr = getattr(f, "_min_num_rounds", 1)
if not issubclass(f, blockcipher.Cipher):
continue # KS are mostly non-permutations
for prop_type in [XorDiff, RXDiff, LinearMask]:
if issubclass(f, blockcipher.Cipher) and prop_type == RXDiff:
continue # RXDiff not supported by EncryptionChModel
if prop_type == LinearMask and sp.linear_nr is None:
continue # most probably due to BvAddCt
msg = f"\n## {f.__name__}, min_nr: {min_nr}, prop_type: {prop_type.__name__}"
if PRINTING_MODE != PrintingMode.Silent:
smart_print(msg)
iterator = round_based_invalidprop_search(
f, 2*min_nr+1, 2*min_nr+1, prop_type, "btor",
max_num_skipped_rounds=0,
min_num_E0_rounds=min_nr, min_num_E2_rounds=min_nr,
exclude_zero_input_prop_E0=True,
exclude_zero_input_prop_E2=True,
extra_invalidpropfinder_args=extra_invalidpropfinder_args
)
try:
tuple_rounds, tuple_chs = next(iterator)
except StopIteration:
continue
except ValueError as e:
if "pr_one_assertions() == False" in str(e): # most probably due to RX
continue
if str(e) == "linear characteristic models of functions with unused input vars is not supported":
continue
raise e
if PRINTING_MODE != PrintingMode.Silent:
if PRINTING_MODE == PrintingMode.WeightsAndSrepr:
smart_print(tuple_rounds, ', '.join([ch.srepr() for ch in tuple_chs]))
else:
smart_print(tuple_rounds, tuple_chs)
@unittest.skipIf(SKIP_INVALID_LONG_TESTS, "test_invalid_cipher_ch_search")
def test_invalid_cipher_ch_search(self):
extra_invalidcipherchfinder_args = {
"printing_mode": PRINTING_MODE,
"filename": self.__class__.filename,
}
if PRINTING_MODE != PrintingMode.Silent:
smart_print = _get_smart_print(self.__class__.filename)
smart_print(f"# {self.test_invalid_prop_search.__name__}")
for sp in ListSearchParameters:
f = sp.function
min_nr = getattr(f, "_min_num_rounds", 1)
if not issubclass(f, blockcipher.Cipher):
continue
for prop_type in [XorDiff, RXDiff]:
if prop_type == XorDiff and sp.ignore_cipher_xor_invalid is True:
continue
if prop_type == RXDiff and sp.ignore_cipher_rx_invalid is True:
continue
msg = | |
<gh_stars>0
import numpy as np
# Create numpy arrays, list and change elements
a=np.array([1,2,3]) # create a 1D array
print('Dimensions of the array = ',a.ndim) # print the dimensions of the array
print('Shape of the array = ', a.shape) # print the shape of the array
a=np.array([ [1,2,3] ])
print('Dimensions of the array = ', a.ndim) # print the dimensions of the array
print('Shape of the array = ', a.shape) # print the shape of the array
b = np.array([[1,2,3],[4,5,6]]) # Create a rank 2 array
print('Shape of the array = ',b.shape) # Prints "(2, 3)"
print('Print select elements of b array')
print(b[0, 0], b[0, 1], b[1, 0]) # Prints "1 2 4"
print(b[0][0], b[0][1], b[0][1]) # Prints "1 2 4"
print('Modify array elements in location 0,2')
b[0,2]=999
print('New value in b[0,2] = ', b[0,2]) # [[ 1 2 3]
# [ 4 5 999]]
print('New b array = ')
print(b)
print()
print('Create an array of zeros')
a = np.zeros((2,2)) # Create an array of all zeros
print(a) # Prints "[[ 0. 0.]
# [ 0. 0.]]"
print()
print('Create an array of ones')
b = np.ones((1,2)) # Create an array of all ones
print(b) # Prints "[[ 1. 1.]]"
print()
print('Create an identity matrix')
d = np.eye(2) # Create a 2x2 identity matrix
print(d) # Prints "[[ 1. 0.]
# [ 0. 1.]]"
print()
print('Create a random 2x2 array')
e = np.random.random((2,2)) # Create an array filled with random values
print(e) # Might print "[[ 0.91940167 0.08143941]
# [ 0.68744134 0.87236687]]"
print()
print('Create a 1D random integer array between 10-100')
e = np.random.random_integers(10,100,10) # print 10 random integers between 10 to 100
print(e)
print()
print('Create a 10x10 random integer array between 10-100')
e = np.random.random_integers(10,100,(10,10)) # print 100 random integers and place them in 10x10 array
print(e)
print()
wait = input("PRESS ENTER TO CONTINUE.")
#Slicing: Numpy arrays can be sliced. Since arrays may be multidimensional, you must specify a slice for each dimension of the array:
print('Create a 3x4 array')
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(a)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
print()
print('Pull out subarray first 2 rows and columns 1-2')
# Use slicing to pull out the subarray consisting of the first 2 rows
# and columns 1 and 2; b is the following array of shape (2, 2):
b = a[:2, 1:3] # [[2 3]
# [6 7]]
print(b)
print()
# Modifying a slice of an array modifies the original array
print('Modify a slice and compare to the original array')
print('Value of element 0,1 of a is',a[0, 1]) # Prints "2"
print('Modify 0,0 of array b to value 77')
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print('Value of element 0,1 of a is now ',a[0, 1]) # Prints "77"
print()
# Create the following rank 2 array with shape (3, 4)
# [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print()
wait = input("PRESS ENTER TO CONTINUE.")
# Two ways of accessing the data in the middle row of the array.
# Mixing integer indexing with slices yields an array of lower rank,
# while using only slices yields an array of the same rank as the
# original array:
row_r1 = a[1, :] # Rank 1 view of the second row of a
row_r2 = a[1:2, :] # Rank 2 view of the second row of a
print('Print extracted row, shape and dims = ',row_r1, row_r1.shape,row_r1.ndim) # Prints "[5 6 7 8] (4,) 1"
print('Print extracted row, shape and dims = ',row_r2, row_r2.shape,row_r2.ndim) # Prints "[[5 6 7 8]] (1, 4) 2"
# We can make the same distinction when accessing columns of an array:
col_r1 = a[:, 1]
col_r2 = a[:, 1:2]
print('Print extracted column = ') # Prints "[ 2 6 10]"
print(col_r1)
print('Print shape and dims of extracted column =', col_r1.shape,col_r1.ndim) # Prints "(3,) 1"
print('Print extracted column = ') # Prints "[[ 2]
# [ 6]
# [10]] (3, 1)"
print(col_r2)
print('Print shape and dims of extracted column = ', col_r2.shape,col_r1.ndim) # Print "(3,1) 2"
wait = input("PRESS ENTER TO CONTINUE.")
#Array indexing:
print('Create an 4x3 integer array')
a = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
print('Shape of the array is =', a.shape)
print()
# Create an array of indices
print('Create 1D index array')
b = np.array([0, 2, 0, 1])
print(b)
print()
# Select one element from each row of a using the indices in b
print('Create a range of indices from 0-3 using arange = ', np.arange(4))
print('Use this range to reference indices of the a array')
print(a[np.arange(4), b]) # Prints "[ 1 6 7 11]"
# Mutate one element from each row of a using the indices in b
print('Modify the indexed elements by adding 10')
a[np.arange(4), b] += 10
print('Modified array then becomes = ',a) # prints "array([[11, 2, 3],
# [ 4, 5, 16],
# [17, 8, 9],
# [10, 21, 12]])
wait = input("PRESS ENTER TO CONTINUE.")
#Reference elements by boolean masks
print('Create an 3x2 numpy array')
a = np.array([[1,2], [3, 4], [5, 6]])
print('Apply a logical mask for all elements that larger than 2')
bool_idx = (a > 2) # Find the elements of a that are bigger than 2;
# this returns a numpy array of Booleans of the same
# shape as a, where each slot of bool_idx tells
# whether that element of a is > 2.
print(bool_idx) # Prints "[[False False]
# [ True True]
# [ True True]]"
# We use boolean array indexing to construct a rank 1 array
# consisting of the elements of a corresponding to the True values
# of bool_idx
print('Extracted array elements where boolean condition is true = ',a[bool_idx]) # Prints "[3 4 5 6]"
# We can do all of the above in a single concise statement:
print('The boolean condition can be an input to the array')
print(a[a > 2]) # Prints "[3 4 5 6]"
wait = input("PRESS ENTER TO CONTINUE.")
#Some arithmetic
print('Create two 2x2 arrays')
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = np.array([[5,6],[7,8]], dtype=np.float64)
print()
# Elementwise sum; both produce the array
# [[ 6.0 8.0]
# [10.0 12.0]]
#print x + y
print('Add them')
print(np.add(x, y))
print()
# Elementwise difference; both produce the array
# [[-4.0 -4.0]
# [-4.0 -4.0]]
#print x - y
print('Subtracted them')
print(np.subtract(x, y))
print()
# Elementwise product; both produce the array
# [[ 5.0 12.0]
# [21.0 32.0]]
#print x * y
print('Multiply by elements')
print(np.multiply(x, y))
print()
# Elementwise division; both produce the array
# [[ 0.2 0.33333333]
# [ 0.42857143 0.5 ]]
#print x / y
print('Divide by elements')
print(np.divide(x, y))
# Elementwise square root; produces the array
# [[ 1. 1.41421356]
# [ 1.73205081 2. ]]
print('Square root')
print(np.sqrt(x))
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
print()
print('Create two 1D arrays and treat them as vectors')
v = np.array([9,10])
print(v)
w = np.array([11, 12])
print(w)
print()
# Inner product of vectors; both produce 219
#print v.dot(w)
print('Inner product = ',np.dot(v, w))
# Matrix / vector product; both produce the rank 1 array [29 67]
#print x.dot(v)
print('Matrix * vector product =',np.dot(x, v))
# Matrix / matrix product; both produce the rank 2 array
# [[19 22]
# [43 50]]
#print(x.dot(y))
print('Matrix * Matrix product')
print(np.dot(x, y))
wait = input("PRESS ENTER TO CONTINUE.")
print('Create a 2x2 array')
x = np.array([[1,2],[3,4]])
print(x)
print()
print('Sum of all elements = ',np.sum(x)) # Compute sum of all elements; prints "10"
print('Sum of all columns =',np.sum(x, axis=0)) # Compute sum of each column; prints "[4 6]"
print('Sum of all rows = ',np.sum(x, axis=1)) # Compute sum of each row; prints "[3 7]"
x = np.array([[1,2], [3,4]])
#print x # Prints "[[1 2]
# [3 4]]"
print('Transpose Matrix = ',x.T) # Prints "[[1 3]
# [2 4]]"
print('Transpose Matrix (alt)= ',np.transpose(x)) # transpose matrix x using transpose
wait = input("PRESS ENTER TO CONTINUE.")
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
print('Create a 4x3 array')
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
print(x)
print()
print('Create a vector with the same number of row elements')
v = np.array([1, 0, 1])
print(v)
print()
print('Create an empty matrix with the same shape as x')
y = np.empty_like(x) # Create an empty matrix with the same shape as x
print(y)
print()
# Add the vector v to each row of the matrix x with an explicit loop
print('Add the vector to each extracted row i x[i,:]')
for i in range(4):
y[i, :] = x[i, :] + v
# Now y is the following
# [[ 2 2 4]
# [ 5 5 7]
# [ 8 8 10]
# [11 11 13]]
print('Modified array')
print(y)
print()
wait = input("PRESS ENTER TO CONTINUE.")
print('Add vector to all matrix rows using tile')
# We will add the vector v to each row | |
1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1],
[0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0],
[1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, | |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.sources.finder Contains the SourceFinder class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
# Import the relevant PTS classes and modules
from .galaxyfinder import GalaxyFinder
from .starfinder import StarFinder
from .trainedfinder import TrainedFinder
from ..basics.mask import Mask
from ..catalog.builder import CatalogBuilder
from ..catalog.synchronizer import CatalogSynchronizer
from ..tools import wavelengths
from ...core.tools import tables
from ...core.basics.configurable import OldConfigurable
from ...core.tools.logging import log
# -----------------------------------------------------------------
class SourceFinder(OldConfigurable):
"""
This class ...
"""
def __init__(self, config=None):
"""
The constructor ...
:param config:
:return:
"""
# Call the constructor of the base class
super(SourceFinder, self).__init__(config, "magic")
# -- Attributes --
# The image frame
self.frame = None
# The original WCS
self.original_wcs = None
# The galactic and stellar catalog
self.galactic_catalog = None
self.stellar_catalog = None
# The mask covering pixels that should be ignored throughout the entire extraction procedure
self.special_mask = None
self.ignore_mask = None
self.bad_mask = None
# The animation
self.animation = None
# The name of the principal galaxy
self.galaxy_name = None
# For downsampling
self.pad_x = 0
self.pad_y = 0
# -----------------------------------------------------------------
@classmethod
def from_arguments(cls, arguments):
"""
This function ...
:param arguments:
"""
# Create a new SourceFinder instance
if arguments.config is not None: finder = cls(arguments.config)
elif arguments.settings is not None: finder = cls(arguments.settings)
else: finder = cls()
# Set the downsample factor
if arguments.downsample is not None: finder.config.downsample_factor = arguments.downsample
# Don't look for saturated stars if requested
if arguments.no_saturation: finder.config.stars.find_saturation = False
# Don't look for other sources if requested
if arguments.no_other: finder.config.find_other_sources = False
# Set the region describing the principal galaxy
if arguments.principal_region is not None: finder.config.galaxies.principal_region = arguments.principal_region
# Set the dilation factor for saturation segments
if arguments.saturation_dilation_factor is not None:
finder.config.stars.saturation.dilate = True
finder.config.stars.saturation.dilation_factor = arguments.saturation_dilation_factor
# Return the new instance
return finder
# -----------------------------------------------------------------
@property
def galaxy_region(self):
"""
This function ...
:return:
"""
if self.downsampled:
sky_region = self.galaxy_sky_region
return sky_region.to_pixel(self.original_wcs) if sky_region is not None else None
return self.galaxy_finder.region
# -----------------------------------------------------------------
@property
def galaxy_sky_region(self):
"""
This function ...
:return:
"""
return self.galaxy_finder.region.to_sky(self.frame.wcs) if self.galaxy_finder.region is not None else None
# -----------------------------------------------------------------
@property
def star_region(self):
"""
This function ...
:return:
"""
if self.downsampled:
sky_region = self.star_sky_region
return sky_region.to_pixel(self.original_wcs) if sky_region is not None else None
else: return self.star_finder.star_region
# -----------------------------------------------------------------
@property
def star_sky_region(self):
"""
This function ...
:return:
"""
return self.star_finder.star_region.to_sky(self.frame.wcs) if self.star_finder.star_region is not None else None
# -----------------------------------------------------------------
@property
def saturation_region(self):
"""
This function ...
:return:
"""
if self.downsampled:
sky_region = self.saturation_sky_region
return sky_region.to_pixel(self.original_wcs) if sky_region is not None else None
else: return self.star_finder.saturation_region
# -----------------------------------------------------------------
@property
def saturation_sky_region(self):
"""
This function ...
:return:
"""
return self.star_finder.saturation_region.to_sky(self.frame.wcs) if self.star_finder.saturation_region is not None else None
# -----------------------------------------------------------------
@property
def other_region(self):
"""
This function ...
:return:
"""
if self.downsampled:
sky_region = self.other_sky_region
return sky_region.to_pixel(self.original_wcs) if sky_region is not None else None
else: return self.trained_finder.region
# -----------------------------------------------------------------
@property
def other_sky_region(self):
"""
This function ...
:return:
"""
return self.trained_finder.region.to_sky(self.frame.wcs) if self.trained_finder.region is not None else None
# -----------------------------------------------------------------
@property
def galaxy_segments(self):
"""
This property ...
:return:
"""
if self.galaxy_finder.segments is None: return None
#if self.downsampled: return self.galaxy_finder.segments.rebinned(self.original_wcs)
if self.downsampled:
segments = self.galaxy_finder.segments
upsampled = segments.upsampled(self.config.downsample_factor, integers=True)
upsampled.unpad(self.pad_x, self.pad_y)
return upsampled
else: return self.galaxy_finder.segments
# -----------------------------------------------------------------
@property
def star_segments(self):
"""
This property ...
:return:
"""
if self.star_finder.segments is None: return None
#return self.star_finder.segments.rebinned(self.original_wcs)
if self.downsampled:
segments = self.star_finder.segments
upsampled = segments.upsampled(self.config.downsample_factor, integers=True)
upsampled.unpad(self.pad_x, self.pad_y)
return upsampled
else: return self.star_finder.segments
# -----------------------------------------------------------------
@property
def other_segments(self):
"""
This property ...
:return:
"""
if self.trained_finder.segments is None: return None
# return self.trained_finder.segments.rebinned(self.original_wcs)
if self.downsampled:
segments = self.trained_finder.segments
upsampled = segments.upsampled(self.config.downsample_factor, integers=True)
upsampled.unpad(self.pad_x, self.pad_y)
return upsampled
else: return self.trained_finder.segments
# -----------------------------------------------------------------
@property
def fwhm(self):
"""
This function ...
:return:
"""
return self.star_finder.fwhm
# -----------------------------------------------------------------
def run(self, frame, galactic_catalog, stellar_catalog, special_region=None, ignore_region=None, bad_mask=None, animation=None):
"""
This function ...
:param frame:
:param galactic_catalog:
:param stellar_catalog:
:param special_region:
:param ignore_region:
:param bad_mask:
:param animation:
:return:
"""
# 1. Call the setup function
self.setup(frame, galactic_catalog, stellar_catalog, special_region, ignore_region, bad_mask, animation)
# 2. Find the galaxies
self.find_galaxies()
# 3. Find the stars
if self.config.find_stars: self.find_stars()
# 4. Look for other sources
if self.config.find_other_sources: self.find_other_sources()
# 5. Build and update catalog
self.build_and_synchronize_catalog()
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Base class implementation removes the children
super(SourceFinder, self).clear()
# Set default values for all attributes
self.frame = None
self.original_wcs = None
self.galactic_catalog = None
self.stellar_catalog = None
self.special_mask = None
self.ignore_mask = None
self.bad_mask = None
self.animation = None
self.galaxy_name = None
# -----------------------------------------------------------------
def setup(self, frame, galactic_catalog, stellar_catalog, special_region, ignore_region, bad_mask=None, animation=None):
"""
This function ...
:param frame:
:param galactic_catalog:
:param stellar_catalog:
:param special_region:
:param ignore_region:
:param bad_mask:
:param animation:
:return:
"""
# -- Create children --
self.add_child("galaxy_finder", GalaxyFinder, self.config.galaxies)
self.add_child("star_finder", StarFinder, self.config.stars)
self.add_child("trained_finder", TrainedFinder, self.config.other_sources)
self.add_child("catalog_builder", CatalogBuilder, self.config.building)
self.add_child("catalog_synchronizer", CatalogSynchronizer, self.config.synchronization)
# -- Setup of the base class --
# Call the setup function of the base class
super(SourceFinder, self).setup()
# Inform the user
log.info("Setting up the source finder ...")
# Make sure the downsample factor is a float (I don't know if this is necesary)
#self.config.downsample_factor = float(self.config.downsample_factor) if self.downsampled else None
# CHECK WHETHER THE DOWNSAMPLE FACTOR IS AN INTEGER
# Downsample or just make a local reference to the image frame
if self.downsampled:
int_factor = int(self.config.downsample_factor)
if not int_factor == self.config.downsample_factor: raise ValueError("The downsample factor must be an integer")
self.config.downsample_factor = int_factor
# Debugging
log.debug("Downsampling the original image with a factor of " + str(self.config.downsample_factor) + " ...")
# Padding
div_x = frame.xsize / self.config.downsample_factor
div_y = frame.ysize / self.config.downsample_factor
# new xsize and ysize
new_xsize = int(math.ceil(div_x)) * self.config.downsample_factor
new_ysize = int(math.ceil(div_y)) * self.config.downsample_factor
# Number of pixels to be padded
self.pad_x = new_xsize - frame.xsize
self.pad_y = new_ysize - frame.ysize
# Debugging
log.debug("Number of pixels padded before downsampling: (" + str(self.pad_x) + ", " + str(self.pad_y) + ")")
# Pad pixels to make it a multiple of the downsampling factor
self.frame = frame.padded(nx=self.pad_x, ny=self.pad_y)
#self.frame = frame.downsampled(self.config.downsample_factor)
self.frame.downsample(self.config.downsample_factor)
self.original_wcs = frame.wcs
# Debugging
log.debug("Shape of the downsampled image: " + str(self.frame.shape) + " (original shape: " + str(frame.shape) + ")")
# Adjust configs for downsampling
self.adjust_configs_for_downsampling()
else: self.frame = frame
# Set the galactic and stellar catalog
self.galactic_catalog = galactic_catalog
self.stellar_catalog = stellar_catalog
# Set the special and ignore mask
if special_region is not None:
special_region_pix = special_region.to_pixel(self.frame.wcs)
self.special_mask = Mask.from_region(special_region_pix, self.frame.xsize, self.frame.ysize)
if ignore_region is not None:
ignore_region_pix = ignore_region.to_pixel(self.frame.wcs)
self.ignore_mask = Mask.from_region(ignore_region_pix, self.frame.xsize, self.frame.ysize)
# Set a reference to the mask of bad pixels
self.bad_mask = bad_mask
# Make a reference to the animation
self.animation = animation
# -----------------------------------------------------------------
@property
def downsampled(self):
"""
This function ...
:return:
"""
return self.config.downsample_factor is not None and self.config.downsample != 1
# -----------------------------------------------------------------
def adjust_configs_for_downsampling(self):
"""
This function ...
:return:
"""
# GALAXY FINDER
self.galaxy_finder.config.detection.initial_radius /= self.config.downsample_factor
self.galaxy_finder.config.detection.min_pixels = int(math.ceil(self.galaxy_finder.config.detection.min_pixels / self.config.downsample_factor))
self.galaxy_finder.config.detection.kernel.fwhm /= self.config.downsample_factor
self.galaxy_finder.config.region.default_radius /= self.config.downsample_factor
# STAR FINDER
self.star_finder.config.fetching.min_distance_from_galaxy.principal /= self.config.downsample_factor
self.star_finder.config.fetching.min_distance_from_galaxy.companion /= self.config.downsample_factor
self.star_finder.config.fetching.min_distance_from_galaxy.other /= self.config.downsample_factor
self.star_finder.config.detection.initial_radius /= self.config.downsample_factor
self.star_finder.config.detection.minimum_pixels = int(math.ceil(self.star_finder.config.detection.minimum_pixels / self.config.downsample_factor))
self.star_finder.config.detection.peak_offset_tolerance /= self.config.downsample_factor
self.star_finder.config.detection.convolution_fwhm /= self.config.downsample_factor
self.star_finder.config.fitting.minimum_pixels = int(math.ceil(self.star_finder.config.fitting.minimum_pixels / self.config.downsample_factor))
self.star_finder.config.fitting.max_model_offset /= self.config.downsample_factor
self.star_finder.config.saturation.min_pixels = int(math.ceil(self.star_finder.config.saturation.min_pixels / self.config.downsample_factor))
self.star_finder.config.saturation.kernel.fwhm /= self.config.downsample_factor
self.star_finder.config.saturation.apertures.max_offset /= self.config.downsample_factor
# TRAINED FINDER
# -----------------------------------------------------------------
def find_galaxies(self):
"""
This function ...
"""
# Inform the user
log.info("Finding the galaxies ...")
# Run the galaxy finder
self.galaxy_finder.run(self.frame, self.galactic_catalog, special=self.special_mask, ignore=self.ignore_mask, bad=self.bad_mask)
# Set the name of the principal galaxy
self.galaxy_name = self.galaxy_finder.principal.name
# Inform the user
log.success("Finished finding the galaxies")
# -----------------------------------------------------------------
def find_stars(self):
"""
This function ...
"""
# Run the star finder if the wavelength of this image is smaller than 25 micron (or the wavelength is unknown)
if self.frame.wavelength is None or self.frame.wavelength < wavelengths.ranges.ir.mir.max:
# Inform the user
log.info("Finding the | |
or filtered through a sum of gaussians as in the original Matlab implementation.
Parameters:
wn: sorted array of wavenumbers (high-to-low or low-to-high)
app: apparent spectrum, shape (pixels, wavenumbers)
ref: reference spectrum; array (wavenumbers)
n_components: number of principal components to be calculated for the extinction matrix
iterations: number of iterations of the algorithm
clusters: if not None, cluster pixels into this many clusters in each iteration and use
a common reference spectrum for each cluster. May be given as a list with one value per
iteration, in which case 0 means to reuse clusters from the previous iteration and mix
new/old references for stable convergence.
If clusters is negative, use stable_rmiesc_clusters to generate the list.
verbose: print progress information
a: indexes of refraction to use in model
d: sphere sizes to use in model, in micrometers
bvals: number of values for the model parameter b
plot: produce plots of the cluster references, if in cluster mode
progressCallback(int a, int b): callback function called to indicated that the processing
is complete to a fraction a/b.
konevskikh: if True, use the faster method by Konevskikh et al.
linearcomponent: if True, include a linear term in the model (used in Bassan's paper only).
weighted: if true, downweight the 1800-2800 region when fitting the model.
renormalize: if True, renormalize spectra against reference in each generation.
autoiterations; if True, iterate until residuals stop improving
targetrelresiduals: if autoiterations, stop when this relative change in residuals is seen
Return: corrected apparent spectra (the best encountered if autoiterations, else the final ones)
"""
# Make a rescaled copy of d and include the factor 4*pi
d = d * 4e-4 * np.pi;
# The input can be a single spectrum or a matrix of spectra. If the former, squeeze at the end.
squeeze = False
if app.ndim == 1:
app = app[None,:]
squeeze = True
if weighted:
weights = np.ones_like(wn)
weights[range(*find_wn_ranges(wn, [[1800, 2800]])[0])] = .001 ** .5
weights = weights[:, None]
else:
weights = None
if plot:
plt.figure()
color=plt.cm.jet(np.linspace(0, 1, iterations))
plt.plot(wn, app.mean(0), 'k', linewidth=.5)
if np.isscalar(clusters):
if clusters == 0:
clusters = None
elif clusters < 0:
clusters = stable_rmiesc_clusters(iterations, -clusters)
iterations = len(clusters)
else:
clusters = np.repeat(clusters, iterations)
elif clusters is not None:
if len(clusters) != iterations:
raise ValueError('len(clusters) must match iterations')
clusters = clusters.copy()
if progressCallback:
# Compute the number of progress steps
progressA = 0
if clusters is None:
progressB = 1 + (iterations > 1) * len(app)
else:
progressB = 0
prev = 1
for cl in clusters:
if cl > 0:
prev = cl
progressB += prev
startt = monotonic()
corrected = None # Just to get rid of warnings in the editor; will be set on iteration 0
# Set parameters for automatic iteration control
if renormalize:
autoupadd = 3 # Residual going up counts as residual going down too little this many times
automax = 3 # Stop when residual has gone down too little this many times
else:
autoupadd = 1
automax = 5
if clusters is not None:
# Cluster mode: In each iteration, after correcting all the spectra, cluster them. Then take the
# mean of the corrected spectra in each cluster as the new reference for that cluster in the next
# iteration.
ref = ref.copy()[None, :] # One reference per cluster
ref = ref / (np.abs(ref).mean() / np.abs(app).mean())
labels = np.zeros(len(app)) # Cluster labels; initially all in cluster 0
# clusters[-1] = 0
progstep = 1 # Current progress bar step size
for iteration in range(iterations):
gc.collect() # Because my old laptop was unhappy with RAM usage otherwise
curc = clusters[iteration] # Current cluster size setting
if curc > 0:
progstep = curc
# Skip this iteration if every spectrum has stopped improving and the cluster settings
# are unchanged
if autoiterations:
if not iteration or curc != clusters[iteration-1]:
unimproved = np.zeros(len(app), dtype=int)
elif (unimproved <= automax).sum() == 0:
progressA += progstep
if progressCallback:
progressCallback(progressA, progressB)
# print('progX',progressA,progressB)
continue
# Possibly recluster the spectra and compute reference spectra
if iteration == 0:
pass
elif curc > 0:
if autoiterations:
notdone = unimproved <= automax
nds = notdone.sum()
curc = min(curc, int(nds))
labels = np.zeros(len(app)) - 1
if curc == nds:
labels[notdone] = range(0, nds)
elif curc > 1:
kmeans = sklearn.cluster.MiniBatchKMeans(curc)
labels[notdone] = kmeans.fit_predict(corrected[notdone,:])
else:
labels[notdone] = 0
else:
if curc > 1:
kmeans = sklearn.cluster.MiniBatchKMeans(curc)
labels = kmeans.fit_predict(corrected)
else:
labels = np.zeros(len(app), dtype=int)
if(len(ref) != curc):
ref = np.zeros((curc, len(wn)))
for cl in range(curc):
sel = labels == cl
if sel.sum() == 0:
print('Info: empty cluster at %d, %d' % (iteration, cl))
else:
ref[cl,:] = corrected[sel].mean(0)
else:
# Mix old reference and corrected spectrum. This requires the clusters
# to remain unchanged.
if autoiterations:
labels[unimproved > automax] = -1 # Exclude all that are done already
for cl in range(len(ref)):
sel = labels == cl
if sel.sum() > 0:
ref[cl,:] = .5 * corrected[sel].mean(0) + .5 * ref[cl,:]
if plot:
plt.plot(wn, ref.T, c=color[iteration], linewidth=.5)
if progressPlotCallback:
progressPlotCallback(ref, (iteration, iterations))
ref[ref < 0] = 0
if iteration == 0 :
projs = [np.dot(app[i], ref[0].T)*(ref[0]/(ref[0] @ ref[0])) for i in range(len(app))]
else :
projs = [np.dot(app[i], corrected[i].T)*(corrected[i]/(corrected[i] @ corrected[i])) for i in range(len(app))]
projs = np.array(projs)
app_deref = app - projs
for cl in range(len(ref)):
ix = np.where(labels == cl)[0] # Indexes of spectra in this cluster
if autoiterations:
ix = ix[unimproved[ix] <= automax]
if ix.size:
model0 = compute_model(wn, ref[cl], n_components, a, d, bvals,
konevskikh=konevskikh, linearcomponent=linearcomponent,
variancelimit=pcavariancelimit)
#print(np.shape(corrected), np.shape(app))
if plot:
plt.figure()
plt.plot(projs[0], label="Proj")
plt.plot(app[0, :] - projs[0], label='Difference')
plt.plot(app[0, :], label='App')
plt.plot(model0[0, :], label='Reference')
if iteration :
plt.plot(corrected[0, :], label='Prev')
plt.legend()
plt.show()
model = model0[1:, :] #Then we don't need the reference part of the model
if weights is None:
cons = np.linalg.lstsq(model.T, app_deref[ix].T, rcond=None)[0]
else:
cons = np.linalg.lstsq(model.T * weights, app_deref[ix].T * weights, rcond=None)[0]
corrs = app[ix] - cons.T @ model
if renormalize:
corrs = corrs / cons[0, :, None]
resids = ((corrs - projs[ix])**2).sum(1) #We compare to the previous correction, not the reference
if iteration == 0:
corrected = corrs
residuals = resids
nimprov = len(resids)
else:
improved = resids < residuals[ix]
iximp = ix[improved] # Indexes of improved spectra
if autoiterations:
impmore = resids[improved] < residuals[iximp] * targetrelresiduals
unimproved[iximp[impmore]] = 0
unimproved[iximp[np.logical_not(impmore)]] += 1
unimproved[ix[np.logical_not(improved)]] += autoupadd
corrected[iximp, :] = corrs[improved, :]
residuals[iximp] = resids[improved]
nimprov = improved.sum()
if verbose:
print("iter %3d, cluster %3d (%5d px): avgres %7.3g imprvd %4d time %f" %
(iteration, cl, len(ix), resids.mean(), nimprov, monotonic()-startt))
if progressCallback:
progressCallback(progressA + cl + 1, progressB)
if progressCallback:
progressA += progstep
if len(ref) < progstep:
progressCallback(progressA, progressB)
# print('progY',progressA,progressB)
else:
# For efficiency, compute the model from the input reference spectrum only once
model = compute_model(wn, ref, n_components, a, d, bvals, konevskikh=konevskikh,
linearcomponent=linearcomponent, variancelimit=pcavariancelimit)
if weights is None:
cons = np.linalg.lstsq(model.T, app.T, rcond=None)[0]
else:
cons = np.linalg.lstsq(model.T * weights, app.T * weights, rcond=None)[0]
corrected = app - cons[1:, :].T @ model[1:, :]
if renormalize:
corrected = corrected / cons[0, :, None]
if autoiterations:
residuals = ((corrected - model[0, :])**2).sum(1)
if progressPlotCallback:
progressPlotCallback(ref, (0, len(app) + 1))
if verbose:
print("all pixels, iter %2d time %f" % (0, monotonic()-startt))
if progressCallback:
progressA += 1
progressCallback(progressA, progressB)
if iterations > 1:
for s in range(len(app)):
gc.collect()
unimproved = 0
ref = corrected[s, :] # Corrected spectrum as new reference
for iteration in range(1, iterations):
ref[ref < 0] = 0. # No negative values in reference spectrum
model = compute_model(wn, ref, n_components, a, d, bvals,
konevskikh=konevskikh, linearcomponent=linearcomponent,
variancelimit=pcavariancelimit)
if weights is None:
cons = np.linalg.lstsq(model.T, app[s], rcond=None)[0]
else:
cons = np.linalg.lstsq(model.T * weights, app[s] * weights[:, 0], rcond=None)[0]
corr = app[s] - cons[1:] @ model[1:, :]
if renormalize:
corr = corr / cons[0]
print("pixel %5d: iter %3d residual %7.3g " %
(s, iteration+1, ((corr - model[0, :])**2).sum()))
if autoiterations:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.