input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>layout/routing/grid.py
# -*- coding: utf-8 -*-
"""This module defines the RoutingGrid class.
"""
from typing import TYPE_CHECKING, Sequence, Union, Tuple, List, Optional, Dict, Any
import numpy as np
from ..util import BBox
from bag.util.search import BinaryIterator
from bag.math import lcm
if TYPE_CHECKING:
from bag.layout.core import TechInfo
class RoutingGrid(object):
"""A class that represents the routing grid.
This class provides various methods to convert between Cartesian coordinates and
routing tracks. This class assumes the lower-left coordinate is (0, 0)
the track numbers are at half-track pitch. That is, even track numbers corresponds
to physical tracks, and odd track numbers corresponds to middle between two tracks.
This convention is chosen so it is easy to locate a via for 2-track wide wires, for
example.
Assumptions:
1. the pitch of all layers evenly divides the largest pitch.
Parameters
----------
tech_info : bag.layout.core.TechInfo
the TechInfo instance used to create metals and vias.
layers : list[int]
list of available routing layers. Must be in increasing order.
spaces : list[float]
list of track spacings for each layer.
widths : list[float]
list of minimum track widths for each layer.
bot_dir : str
the direction of the bottom-most layer. Either 'x' for horizontal tracks or 'y' for
vertical tracks.
max_num_tr : int or list[int]
maximum track width in number of tracks. Can be given as an integer (which applies to
all layers), our a list to specify maximum width per layer.
"""
def __init__(self, # type: RoutingGrid
tech_info, # type: TechInfo
layers, # type: Sequence[int]
spaces, # type: Sequence[float]
widths, # type: Sequence[float]
bot_dir, # type: str
max_num_tr=1000, # type: Union[int, Sequence[int]]
width_override=None, # type: Dict[int, Dict[int, float]]
):
# type: (...) -> None
# error checking
num_layer = len(layers)
if len(spaces) != num_layer:
raise ValueError('spaces length = %d != %d' % (len(spaces), num_layer))
if len(widths) != num_layer:
raise ValueError('spaces length = %d != %d' % (len(widths), num_layer))
if isinstance(max_num_tr, int):
max_num_tr = [max_num_tr] * num_layer
elif len(max_num_tr) != num_layer:
raise ValueError('max_num_tr length = %d != %d' % (len(max_num_tr), num_layer))
self._tech_info = tech_info
self._resolution = tech_info.resolution
self._layout_unit = tech_info.layout_unit
self._flip_parity = {}
self._ignore_layers = set()
self.layers = []
self.sp_tracks = {}
self.w_tracks = {}
self.offset_tracks = {}
self.dir_tracks = {}
self.max_num_tr_tracks = {}
self.block_pitch = {}
self.w_override = {}
self.private_layers = []
cur_dir = bot_dir
for lay, sp, w, max_num in zip(layers, spaces, widths, max_num_tr):
self.add_new_layer(lay, sp, w, cur_dir, max_num_tr=max_num, is_private=False)
# alternate track direction
cur_dir = 'y' if cur_dir == 'x' else 'x'
self.update_block_pitch()
# add width overrides
if width_override is not None:
for layer_id, w_info in width_override.items():
for width_ntr, tr_w in w_info.items():
self.add_width_override(layer_id, width_ntr, tr_w)
def __contains__(self, layer):
# type: (int) -> bool
"""Returns True if this RoutingGrid contains the given layer. """
return layer in self.sp_tracks
@classmethod
def get_middle_track(cls, tr1, tr2, round_up=False):
# type: (Union[float, int], Union[float, int], bool) -> Union[float, int]
test = int(round((tr1 + tr2) * 2))
if test % 4 == 0:
return test // 4
if test % 4 == 1:
return (test + 1) / 4 if round_up else (test - 1) // 4
if test % 4 == 2:
return test / 4
return (test + 1) // 4 if round_up else (test - 1) / 4
def _get_track_offset(self, layer_id):
# type: (int) -> int
"""Returns the track offset in resolution units on the given layer."""
track_pitch = self.get_track_pitch(layer_id, unit_mode=True)
return self.offset_tracks.get(layer_id, track_pitch // 2)
def get_flip_parity(self):
# type: () -> Dict[int, Tuple[int, int]]
"""Returns a copy of the flip parity dictionary."""
return self._flip_parity.copy()
def get_bot_common_layer(self, inst_grid, inst_top_layer):
# type: (RoutingGrid, int) -> int
"""Given an instance's RoutingGrid, return the bottom common layer ID.
Parameters
----------
inst_grid : RoutingGrid
the instance's RoutingGrid object.
inst_top_layer : int
the instance top layer ID.
Returns
-------
bot_layer : int
the bottom common layer ID.
"""
my_bot_layer = self.layers[0]
for bot_layer in range(inst_top_layer, my_bot_layer - 1, -1):
has_bot = (bot_layer in self.layers)
inst_has_bot = (bot_layer in inst_grid.layers)
if has_bot and inst_has_bot:
w_par, sp_par = self.get_track_info(bot_layer, unit_mode=True)
w_inst, sp_inst = inst_grid.get_track_info(bot_layer, unit_mode=True)
if w_par != w_inst or sp_par != sp_inst or \
self.get_direction(bot_layer) != inst_grid.get_direction(bot_layer):
return bot_layer + 1
elif has_bot != inst_has_bot:
return bot_layer + 1
return my_bot_layer
def get_flip_parity_at(self, # type: RoutingGrid
bot_layer, # type: int
top_layer, # type: int
loc, # type: Tuple[Union[int, float], Union[int, float]]
orient, # type: str
unit_mode=False, # type: bool
):
# type: (...) -> Dict[int, Tuple[int, int]]
"""Compute the flip parity dictionary for an instance placed at the given location.
Parameters
----------
bot_layer : int
the bottom layer ID, inclusive.
top_layer : int
the top layer ID, inclusive.
loc : Tuple[Union[int, float], Union[int, float]]
the instance origin location.
orient : str
the instance orientation.
unit_mode : bool
True if loc is given in resolution units.
Returns
-------
flip_parity : Dict[int, Tuple[int, int]]
the flip_parity dictionary.
"""
if unit_mode:
xo, yo = loc
else:
res = self._resolution
xo, yo = int(round(loc[0] / res)), int(round(loc[1] / res))
if orient == 'R0':
xscale, yscale = 1, 1
elif orient == 'MX':
xscale, yscale = -1, 1
elif orient == 'MY':
xscale, yscale = 1, -1
elif orient == 'R180':
xscale, yscale = -1, -1
else:
raise ValueError('Unknown orientation: %s' % orient)
flip_par = {}
for lay in range(bot_layer, top_layer + 1):
if lay in self.layers:
tdir = self.dir_tracks[lay]
# find the track in top level that corresponds to the track at instance origin
if tdir == 'y':
coord, scale = xo, yscale
else:
coord, scale = yo, xscale
tr_idx = self.coord_to_track(lay, coord, unit_mode=True)
offset_htr = int(round(tr_idx * 2 + 1))
cur_scale, cur_offset = self._flip_parity.get(lay, (1, 0))
new_scale = cur_scale * scale
new_offset = (cur_scale * offset_htr + cur_offset) % 4
flip_par[lay] = (new_scale, new_offset)
return flip_par
def set_flip_parity(self, fp):
# type: (Dict[int, Tuple[int, int]]) -> None
"""set the flip track parity dictionary."""
for lay in fp:
self._flip_parity[lay] = fp[lay]
@property
def tech_info(self):
# type: () -> TechInfo
"""The TechInfo technology object."""
return self._tech_info
@property
def resolution(self):
# type: () -> float
"""Returns the grid resolution."""
return self._resolution
@property
def layout_unit(self):
# type: () -> float
"""Returns the layout unit length, in meters."""
return self._layout_unit
@property
def top_private_layer(self):
# type: () -> int
"""Returns the top private layer ID."""
return -99 if not self.private_layers else self.private_layers[-1]
def update_block_pitch(self):
# type: () -> None
"""Update block pitch."""
self.block_pitch.clear()
top_private_layer = self.top_private_layer
# update private block pitches
lay_list = [lay for lay in self.layers
if lay <= top_private_layer and lay not in self._ignore_layers]
self._update_block_pitch_helper(lay_list)
# update public block pitches
lay_list = [lay for lay in self.layers
if lay > top_private_layer and lay not in self._ignore_layers]
self._update_block_pitch_helper(lay_list)
def _update_block_pitch_helper(self, lay_list):
# type: (Sequence[int]) -> None
"""helper method for updating block pitch."""
pitch_list = []
for lay in lay_list:
cur_bp = self.get_track_pitch(lay, unit_mode=True)
cur_bp2 = cur_bp // 2
cur_dir = self.dir_tracks[lay]
if pitch_list:
# the pitch of each layer = LCM of all layers below with same direction
for play, (bp, bp2) in zip(lay_list, pitch_list):
if self.dir_tracks[play] == cur_dir:
cur_bp = lcm([cur_bp, bp])
cur_bp2 = lcm([cur_bp2, bp2])
result = (cur_bp, cur_bp2)
pitch_list.append(result)
self.block_pitch[lay] = result
def get_direction(self, layer_id):
# type: (int) -> str
"""Returns the track direction of the given layer.
Parameters
----------
layer_id : int
the layer ID.
Returns
-------
tdir : str
'x' for horizontal tracks, 'y' for vertical tracks.
"""
return self.dir_tracks[layer_id]
def get_track_pitch(self, layer_id, unit_mode=False):
# type: (int, bool) -> Union[float, int]
"""Returns the routing track pitch on the given layer.
Parameters
----------
layer_id : int
the routing layer ID.
unit_mode : bool
True to return block pitch in resolution units.
Returns
-------
track_pitch : Union[float, int]
the track pitch in layout units.
"""
pitch = self.w_tracks[layer_id] + self.sp_tracks[layer_id]
return pitch if unit_mode else pitch * self._resolution
def get_track_width(self, layer_id, width_ntr, unit_mode=False):
# type: (int, int, bool) -> Union[float, int]
"""Calculate track width in layout units from number of tracks.
Parameters
| |
<reponame>pulumi/pulumi-kubernetes-crds
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'IBMBlockCSISpec',
'IBMBlockCSISpecController',
'IBMBlockCSISpecControllerAffinity',
'IBMBlockCSISpecControllerAffinityNodeAffinity',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'IBMBlockCSISpecControllerAffinityPodAffinity',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAntiAffinity',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecControllerAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecControllerTolerations',
'IBMBlockCSISpecNode',
'IBMBlockCSISpecNodeAffinity',
'IBMBlockCSISpecNodeAffinityNodeAffinity',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions',
'IBMBlockCSISpecNodeAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchExpressions',
'IBMBlockCSISpecNodeAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTermsMatchFields',
'IBMBlockCSISpecNodeAffinityPodAffinity',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAntiAffinity',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTerm',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionPodAffinityTermLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecution',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelector',
'IBMBlockCSISpecNodeAffinityPodAntiAffinityRequiredDuringSchedulingIgnoredDuringExecutionLabelSelectorMatchExpressions',
'IBMBlockCSISpecNodeTolerations',
'IBMBlockCSISpecSidecars',
'IBMBlockCSIStatus',
]
@pulumi.output_type
class IBMBlockCSISpec(dict):
"""
IBMBlockCSISpec defines the desired state of IBMBlockCSI
"""
def __init__(__self__, *,
controller: 'outputs.IBMBlockCSISpecController',
node: 'outputs.IBMBlockCSISpecNode',
image_pull_secrets: Optional[Sequence[str]] = None,
sidecars: Optional[Sequence['outputs.IBMBlockCSISpecSidecars']] = None):
"""
IBMBlockCSISpec defines the desired state of IBMBlockCSI
:param 'IBMBlockCSISpecControllerArgs' controller: IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
:param 'IBMBlockCSISpecNodeArgs' node: IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
pulumi.set(__self__, "controller", controller)
pulumi.set(__self__, "node", node)
if image_pull_secrets is not None:
pulumi.set(__self__, "image_pull_secrets", image_pull_secrets)
if sidecars is not None:
pulumi.set(__self__, "sidecars", sidecars)
@property
@pulumi.getter
def controller(self) -> 'outputs.IBMBlockCSISpecController':
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
"""
return pulumi.get(self, "controller")
@property
@pulumi.getter
def node(self) -> 'outputs.IBMBlockCSISpecNode':
"""
IBMBlockCSINodeSpec defines the desired state of IBMBlockCSINode
"""
return pulumi.get(self, "node")
@property
@pulumi.getter(name="imagePullSecrets")
def image_pull_secrets(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "image_pull_secrets")
@property
@pulumi.getter
def sidecars(self) -> Optional[Sequence['outputs.IBMBlockCSISpecSidecars']]:
return pulumi.get(self, "sidecars")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecController(dict):
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
"""
def __init__(__self__, *,
repository: str,
tag: str,
affinity: Optional['outputs.IBMBlockCSISpecControllerAffinity'] = None,
image_pull_policy: Optional[str] = None,
tolerations: Optional[Sequence['outputs.IBMBlockCSISpecControllerTolerations']] = None):
"""
IBMBlockCSIControllerSpec defines the desired state of IBMBlockCSIController
:param 'IBMBlockCSISpecControllerAffinityArgs' affinity: Affinity is a group of affinity scheduling rules.
:param str image_pull_policy: PullPolicy describes a policy for if/when to pull a container image
"""
pulumi.set(__self__, "repository", repository)
pulumi.set(__self__, "tag", tag)
if affinity is not None:
pulumi.set(__self__, "affinity", affinity)
if image_pull_policy is not None:
pulumi.set(__self__, "image_pull_policy", image_pull_policy)
if tolerations is not None:
pulumi.set(__self__, "tolerations", tolerations)
@property
@pulumi.getter
def repository(self) -> str:
return pulumi.get(self, "repository")
@property
@pulumi.getter
def tag(self) -> str:
return pulumi.get(self, "tag")
@property
@pulumi.getter
def affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinity']:
"""
Affinity is a group of affinity scheduling rules.
"""
return pulumi.get(self, "affinity")
@property
@pulumi.getter(name="imagePullPolicy")
def image_pull_policy(self) -> Optional[str]:
"""
PullPolicy describes a policy for if/when to pull a container image
"""
return pulumi.get(self, "image_pull_policy")
@property
@pulumi.getter
def tolerations(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerTolerations']]:
return pulumi.get(self, "tolerations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinity(dict):
"""
Affinity is a group of affinity scheduling rules.
"""
def __init__(__self__, *,
node_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinity'] = None,
pod_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinity'] = None,
pod_anti_affinity: Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinity'] = None):
"""
Affinity is a group of affinity scheduling rules.
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityArgs' node_affinity: Describes node affinity scheduling rules for the pod.
:param 'IBMBlockCSISpecControllerAffinityPodAffinityArgs' pod_affinity: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
:param 'IBMBlockCSISpecControllerAffinityPodAntiAffinityArgs' pod_anti_affinity: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
if node_affinity is not None:
pulumi.set(__self__, "node_affinity", node_affinity)
if pod_affinity is not None:
pulumi.set(__self__, "pod_affinity", pod_affinity)
if pod_anti_affinity is not None:
pulumi.set(__self__, "pod_anti_affinity", pod_anti_affinity)
@property
@pulumi.getter(name="nodeAffinity")
def node_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinity']:
"""
Describes node affinity scheduling rules for the pod.
"""
return pulumi.get(self, "node_affinity")
@property
@pulumi.getter(name="podAffinity")
def pod_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAffinity']:
"""
Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_affinity")
@property
@pulumi.getter(name="podAntiAffinity")
def pod_anti_affinity(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityPodAntiAffinity']:
"""
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
"""
return pulumi.get(self, "pod_anti_affinity")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinity(dict):
"""
Describes node affinity scheduling rules for the pod.
"""
def __init__(__self__, *,
preferred_during_scheduling_ignored_during_execution: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']] = None,
required_during_scheduling_ignored_during_execution: Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution'] = None):
"""
Describes node affinity scheduling rules for the pod.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionArgs'] preferred_during_scheduling_ignored_during_execution: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.IBMBlockCSISpecControllerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def | |
ADDR_TYPES:
create_interface_in_kernel(
tgen,
"r1",
"loopback1",
LOOPBACK_1[addr_type],
"RED_A",
)
create_interface_in_kernel(
tgen,
"r1",
"loopback2",
LOOPBACK_2[addr_type],
"BLUE_A",
)
step(
"Create a static routes in vrf RED_B on router RED_1 pointing"
" next-hop as interface's IP in vrf RED_A"
)
intf_r2_r12 = topo["routers"]["r2"]["links"]["r1-link1"]["interface"]
intf_r2_r10 = topo["routers"]["r2"]["links"]["r1-link3"]["interface"]
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r2": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r2_r10,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r2_r12,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute connected..")
input_dict_3 = {}
for dut in ["r1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [100, 100]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["r2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [100, 100]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes are installed into vrfs RED_A"
"and RED_B tables only, not in global routing table of RED_1"
)
for addr_type in ADDR_TYPES:
dut = "r2"
input_dict = {
"r2": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r2_r10,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r2_r12,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_inter_vrf_and_intra_vrf_communication_eBGP_p0(request):
"""
FUNC_11:
Verify intra-vrf and inter-vrf communication
between eBGP peers.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Configure unique loopback IP(IPv4+IPv6) in vrf RED_A on router"
" R2 and advertise it in BGP process using redistribute "
"connected command."
)
step(
"Configure unique loopback IP(IPv4+IPv6) in vrf BLUE_A on router"
" R2 and advertise it in BGP process using redistribute "
"connected command."
)
for addr_type in ADDR_TYPES:
create_interface_in_kernel(
tgen,
"r2",
"loopback1",
LOOPBACK_1[addr_type],
"RED_A",
)
create_interface_in_kernel(
tgen,
"r2",
"loopback2",
LOOPBACK_2[addr_type],
"BLUE_A",
)
step(
"Create a static routes in vrf RED_B on router RED_1 pointing"
" next-hop as interface's IP in vrf RED_A"
)
intf_r3_r21 = topo["routers"]["r3"]["links"]["r2-link1"]["interface"]
intf_r3_r23 = topo["routers"]["r3"]["links"]["r2-link3"]["interface"]
for addr_type in ADDR_TYPES:
input_dict_1 = {
"r3": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r3_r23,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r3_r21,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["r3"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [200, 200]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step("Redistribute connected..")
input_dict_3 = {}
for dut in ["r2"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
VRFS = ["RED_A", "BLUE_A"]
AS_NUM = [100, 100]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "connected"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that static routes are installed into vrfs RED_A"
"and RED_B tables only, not in global routing table of RED_1"
)
for addr_type in ADDR_TYPES:
dut = "r3"
input_dict = {
"r3": {
"static_routes": [
{
"network": LOOPBACK_2[addr_type],
"interface": intf_r3_r23,
"nexthop_vrf": "BLUE_A",
"vrf": "RED_A",
},
{
"network": LOOPBACK_1[addr_type],
"interface": intf_r3_r21,
"nexthop_vrf": "RED_A",
"vrf": "BLUE_A",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
write_test_footer(tc_name)
def test_route_map_within_vrf_to_alter_bgp_attribute_nexthop_p0(request):
"""
FUNC_12_a:
Configure route-maps within a VRF, to alter BGP attributes.
Verify that route-map doesn't affect any other VRF instances'
routing on DUT.
"""
tgen = get_topogen()
tc_name = request.node.name
write_test_header(tc_name)
reset_config_on_routers(tgen)
if tgen.routers_have_failure():
check_router_status(tgen)
step(
"Advertise a set of BGP prefixes(IPv4+IPv6) from RED_1 and"
" RED_2 in vrf instances(RED_A and RED_B)."
)
for addr_type in ADDR_TYPES:
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step(
"Advertise same set of BGP prefixes(IPv4+IPv6) from BLUE_1 and"
"BLUE_2 in vrf instances(BLUE_A and BLUE_B)"
)
for addr_type in ADDR_TYPES:
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = create_static_routes(tgen, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error: {}".format(
tc_name, result
)
step("Redistribute static..")
input_dict_3 = {}
for dut in ["red1", "blue1"]:
temp = {dut: {"bgp": []}}
input_dict_3.update(temp)
if "red" in dut:
VRFS = ["RED_A", "RED_B"]
AS_NUM = [500, 500]
elif "blue" in dut:
VRFS = ["BLUE_A", "BLUE_B"]
AS_NUM = [800, 800]
for vrf, as_num in zip(VRFS, AS_NUM):
temp[dut]["bgp"].append(
{
"local_as": as_num,
"vrf": vrf,
"address_family": {
"ipv4": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
"ipv6": {
"unicast": {"redistribute": [{"redist_type": "static"}]}
},
},
}
)
result = create_router_bgp(tgen, topo, input_dict_3)
assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result)
step(
"Verify that within vrf instances, BGP best path selection"
" algorithm remains intact and doesn't affect any other VRFs"
" routing decision."
)
for addr_type in ADDR_TYPES:
dut = "r2"
input_dict_1 = {
"red1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "RED_B",
},
]
}
}
input_dict_2 = {
"blue1": {
"static_routes": [
{
"network": [NETWORK1_1[addr_type]] + [NETWORK1_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_A",
},
{
"network": [NETWORK2_1[addr_type]] + [NETWORK2_2[addr_type]],
"next_hop": NEXT_HOP_IP[addr_type],
"vrf": "BLUE_B",
},
]
}
}
result = verify_rib(tgen, addr_type, dut, input_dict_1)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
result = verify_rib(tgen, addr_type, dut, input_dict_2)
assert result is True, "Testcase {} : Failed \n Error {}".format(
tc_name, result
)
step("Delete nexthop-self configure from r1")
input_dict_4 = {
"r1": {
"bgp": [
{
"local_as": "100",
"vrf": "RED_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"next_hop_self": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link1": {"next_hop_self": False}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "RED_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link2": {"next_hop_self": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link2": {"next_hop_self": False}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_A",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link3": {"next_hop_self": False}
}
},
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link3": {"next_hop_self": False}
}
}
}
}
},
},
},
{
"local_as": "100",
"vrf": "BLUE_B",
"address_family": {
"ipv4": {
"unicast": {
"neighbor": {
"r2": {
"dest_link": {
"r1-link4": {"next_hop_self": False}
}
}
}
}
},
"ipv6": {
"unicast": {
"neighbor": {
"r2": {
| |
for OFFLINE Migrations.
:param pulumi.Input[str] compartment_id: (Updatable) OCID of the compartment where the secret containing the credentials will be created.
:param pulumi.Input[pulumi.InputType['MigrationDataTransferMediumDetailsArgs']] data_transfer_medium_details: (Updatable) Data Transfer Medium details for the Migration. If not specified, it will default to Database Link. Only one type of medium details can be specified.
:param pulumi.Input[pulumi.InputType['MigrationDatapumpSettingsArgs']] datapump_settings: (Updatable) Optional settings for Datapump Export and Import jobs
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] display_name: (Updatable) Migration Display Name
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MigrationExcludeObjectArgs']]]] exclude_objects: (Updatable) Database objects to exclude from migration.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[pulumi.InputType['MigrationGoldenGateDetailsArgs']] golden_gate_details: (Updatable) Details about Oracle GoldenGate Microservices. Required for online logical migration.
:param pulumi.Input[str] source_container_database_connection_id: (Updatable) The OCID of the Source Container Database Connection. Only used for ONLINE migrations. Only Connections of type Non-Autonomous can be used as source container databases.
:param pulumi.Input[str] source_database_connection_id: (Updatable) The OCID of the Source Database Connection.
:param pulumi.Input[str] target_database_connection_id: (Updatable) The OCID of the Target Database Connection.
:param pulumi.Input[str] type: (Updatable) Migration type.
:param pulumi.Input[pulumi.InputType['MigrationVaultDetailsArgs']] vault_details: (Updatable) Oracle Cloud Infrastructure Vault details to store migration and connection credentials secrets
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MigrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Migration resource in Oracle Cloud Infrastructure Database Migration service.
Create a Migration resource that contains all the details to perform the
database migration operation like source and destination database
details, credentials, etc.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_migration = oci.databasemigration.Migration("testMigration",
compartment_id=var["compartment_id"],
source_database_connection_id=oci_database_migration_connection["test_connection"]["id"],
target_database_connection_id=oci_database_migration_connection["test_connection"]["id"],
type=var["migration_type"],
agent_id=oci_database_migration_agent["test_agent"]["id"],
data_transfer_medium_details=oci.databasemigration.MigrationDataTransferMediumDetailsArgs(
database_link_details=oci.databasemigration.MigrationDataTransferMediumDetailsDatabaseLinkDetailsArgs(
name=var["migration_data_transfer_medium_details_database_link_details_name"],
),
object_storage_details=oci.databasemigration.MigrationDataTransferMediumDetailsObjectStorageDetailsArgs(
bucket=var["migration_data_transfer_medium_details_object_storage_details_bucket"],
namespace=var["migration_data_transfer_medium_details_object_storage_details_namespace"],
),
),
datapump_settings=oci.databasemigration.MigrationDatapumpSettingsArgs(
data_pump_parameters=oci.databasemigration.MigrationDatapumpSettingsDataPumpParametersArgs(
estimate=var["migration_datapump_settings_data_pump_parameters_estimate"],
exclude_parameters=var["migration_datapump_settings_data_pump_parameters_exclude_parameters"],
export_parallelism_degree=var["migration_datapump_settings_data_pump_parameters_export_parallelism_degree"],
import_parallelism_degree=var["migration_datapump_settings_data_pump_parameters_import_parallelism_degree"],
is_cluster=var["migration_datapump_settings_data_pump_parameters_is_cluster"],
table_exists_action=var["migration_datapump_settings_data_pump_parameters_table_exists_action"],
),
export_directory_object=oci.databasemigration.MigrationDatapumpSettingsExportDirectoryObjectArgs(
name=var["migration_datapump_settings_export_directory_object_name"],
path=var["migration_datapump_settings_export_directory_object_path"],
),
import_directory_object=oci.databasemigration.MigrationDatapumpSettingsImportDirectoryObjectArgs(
name=var["migration_datapump_settings_import_directory_object_name"],
path=var["migration_datapump_settings_import_directory_object_path"],
),
job_mode=var["migration_datapump_settings_job_mode"],
metadata_remaps=[oci.databasemigration.MigrationDatapumpSettingsMetadataRemapArgs(
new_value=var["migration_datapump_settings_metadata_remaps_new_value"],
old_value=var["migration_datapump_settings_metadata_remaps_old_value"],
type=var["migration_datapump_settings_metadata_remaps_type"],
)],
),
defined_tags={
"foo-namespace.bar-key": "value",
},
display_name=var["migration_display_name"],
exclude_objects=[oci.databasemigration.MigrationExcludeObjectArgs(
object=var["migration_exclude_objects_object"],
owner=var["migration_exclude_objects_owner"],
)],
freeform_tags={
"bar-key": "value",
},
golden_gate_details=oci.databasemigration.MigrationGoldenGateDetailsArgs(
hub=oci.databasemigration.MigrationGoldenGateDetailsHubArgs(
rest_admin_credentials=oci.databasemigration.MigrationGoldenGateDetailsHubRestAdminCredentialsArgs(
password=var["migration_golden_gate_details_hub_rest_admin_credentials_password"],
username=var["migration_golden_gate_details_hub_rest_admin_credentials_username"],
),
source_db_admin_credentials=oci.databasemigration.MigrationGoldenGateDetailsHubSourceDbAdminCredentialsArgs(
password=var["migration_golden_gate_details_hub_source_db_admin_credentials_password"],
username=var["migration_golden_gate_details_hub_source_db_admin_credentials_username"],
),
source_microservices_deployment_name=oci_apigateway_deployment["test_deployment"]["name"],
target_db_admin_credentials=oci.databasemigration.MigrationGoldenGateDetailsHubTargetDbAdminCredentialsArgs(
password=var["migration_golden_gate_details_hub_target_db_admin_credentials_password"],
username=var["migration_golden_gate_details_hub_target_db_admin_credentials_username"],
),
target_microservices_deployment_name=oci_apigateway_deployment["test_deployment"]["name"],
url=var["migration_golden_gate_details_hub_url"],
compute_id=oci_database_migration_compute["test_compute"]["id"],
source_container_db_admin_credentials=oci.databasemigration.MigrationGoldenGateDetailsHubSourceContainerDbAdminCredentialsArgs(
password=var["migration_golden_gate_details_hub_source_container_db_admin_credentials_password"],
username=var["migration_golden_gate_details_hub_source_container_db_admin_credentials_username"],
),
),
settings=oci.databasemigration.MigrationGoldenGateDetailsSettingsArgs(
acceptable_lag=var["migration_golden_gate_details_settings_acceptable_lag"],
extract=oci.databasemigration.MigrationGoldenGateDetailsSettingsExtractArgs(
long_trans_duration=var["migration_golden_gate_details_settings_extract_long_trans_duration"],
performance_profile=var["migration_golden_gate_details_settings_extract_performance_profile"],
),
replicat=oci.databasemigration.MigrationGoldenGateDetailsSettingsReplicatArgs(
map_parallelism=var["migration_golden_gate_details_settings_replicat_map_parallelism"],
max_apply_parallelism=var["migration_golden_gate_details_settings_replicat_max_apply_parallelism"],
min_apply_parallelism=var["migration_golden_gate_details_settings_replicat_min_apply_parallelism"],
),
),
),
source_container_database_connection_id=oci_database_migration_connection["test_connection"]["id"],
vault_details=oci.databasemigration.MigrationVaultDetailsArgs(
compartment_id=var["compartment_id"],
key_id=oci_kms_key["test_key"]["id"],
vault_id=oci_kms_vault["test_vault"]["id"],
))
```
## Import
Migrations can be imported using the `id`, e.g.
```sh
$ pulumi import oci:databasemigration/migration:Migration test_migration "id"
```
:param str resource_name: The name of the resource.
:param MigrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MigrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_id: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
data_transfer_medium_details: Optional[pulumi.Input[pulumi.InputType['MigrationDataTransferMediumDetailsArgs']]] = None,
datapump_settings: Optional[pulumi.Input[pulumi.InputType['MigrationDatapumpSettingsArgs']]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
exclude_objects: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MigrationExcludeObjectArgs']]]]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
golden_gate_details: Optional[pulumi.Input[pulumi.InputType['MigrationGoldenGateDetailsArgs']]] = None,
source_container_database_connection_id: Optional[pulumi.Input[str]] = None,
source_database_connection_id: Optional[pulumi.Input[str]] = None,
target_database_connection_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
vault_details: Optional[pulumi.Input[pulumi.InputType['MigrationVaultDetailsArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MigrationArgs.__new__(MigrationArgs)
__props__.__dict__["agent_id"] = agent_id
if compartment_id is None and not opts.urn:
raise TypeError("Missing required property 'compartment_id'")
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["data_transfer_medium_details"] = data_transfer_medium_details
__props__.__dict__["datapump_settings"] = datapump_settings
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["exclude_objects"] = exclude_objects
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["golden_gate_details"] = golden_gate_details
__props__.__dict__["source_container_database_connection_id"] = source_container_database_connection_id
if source_database_connection_id is None and not opts.urn:
raise TypeError("Missing required property 'source_database_connection_id'")
__props__.__dict__["source_database_connection_id"] = source_database_connection_id
if target_database_connection_id is None and not opts.urn:
raise TypeError("Missing required property 'target_database_connection_id'")
__props__.__dict__["target_database_connection_id"] = target_database_connection_id
if type is None and not opts.urn:
raise TypeError("Missing required property 'type'")
__props__.__dict__["type"] = type
__props__.__dict__["vault_details"] = vault_details
__props__.__dict__["credentials_secret_id"] = None
__props__.__dict__["executing_job_id"] = None
__props__.__dict__["lifecycle_details"] = None
__props__.__dict__["state"] = None
__props__.__dict__["system_tags"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_last_migration"] = None
__props__.__dict__["time_updated"] = None
__props__.__dict__["wait_after"] = None
super(Migration, __self__).__init__(
'oci:databasemigration/migration:Migration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
agent_id: Optional[pulumi.Input[str]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
credentials_secret_id: Optional[pulumi.Input[str]] = None,
data_transfer_medium_details: Optional[pulumi.Input[pulumi.InputType['MigrationDataTransferMediumDetailsArgs']]] = None,
datapump_settings: Optional[pulumi.Input[pulumi.InputType['MigrationDatapumpSettingsArgs']]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
display_name: Optional[pulumi.Input[str]] = None,
exclude_objects: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MigrationExcludeObjectArgs']]]]] = None,
executing_job_id: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
golden_gate_details: Optional[pulumi.Input[pulumi.InputType['MigrationGoldenGateDetailsArgs']]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
source_container_database_connection_id: Optional[pulumi.Input[str]] = None,
source_database_connection_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
system_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
target_database_connection_id: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_last_migration: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
vault_details: Optional[pulumi.Input[pulumi.InputType['MigrationVaultDetailsArgs']]] = None,
wait_after: Optional[pulumi.Input[str]] = None) -> 'Migration':
"""
Get an existing Migration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] agent_id: (Updatable) The OCID of the registered ODMS Agent. Required for OFFLINE Migrations.
:param pulumi.Input[str] compartment_id: (Updatable) OCID of the compartment where the secret containing the credentials will be created.
:param pulumi.Input[str] credentials_secret_id: OCID of the Secret in the Oracle Cloud Infrastructure vault containing the Migration credentials. Used to store Golden Gate admin user credentials.
:param pulumi.Input[pulumi.InputType['MigrationDataTransferMediumDetailsArgs']] data_transfer_medium_details: (Updatable) Data Transfer Medium details for the Migration. If not specified, it will default to Database Link. Only one type of medium details can be specified.
:param pulumi.Input[pulumi.InputType['MigrationDatapumpSettingsArgs']] datapump_settings: (Updatable) Optional settings for Datapump Export and Import jobs
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] display_name: (Updatable) Migration Display Name
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MigrationExcludeObjectArgs']]]] exclude_objects: (Updatable) Database objects to exclude from migration.
:param pulumi.Input[str] executing_job_id: OCID of the current ODMS Job in execution for the Migration, if any.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[pulumi.InputType['MigrationGoldenGateDetailsArgs']] golden_gate_details: (Updatable) Details about Oracle GoldenGate Microservices. Required for online logical migration.
:param pulumi.Input[str] lifecycle_details: Additional status related to the execution and current state of the Migration.
:param pulumi.Input[str] source_container_database_connection_id: (Updatable) The OCID of the Source Container Database Connection. Only used for ONLINE migrations. Only Connections of type Non-Autonomous can be used as source container databases.
:param pulumi.Input[str] source_database_connection_id: (Updatable) The OCID of the Source Database Connection.
:param pulumi.Input[str] state: The current state of the Migration Resource.
:param pulumi.Input[Mapping[str, Any]] system_tags: Usage of system tag keys. These predefined keys are scoped to namespaces. Example: `{"orcl-cloud.free-tier-retained": "true"}`
:param pulumi.Input[str] target_database_connection_id: (Updatable) The OCID of the Target Database Connection.
:param pulumi.Input[str] time_created: The time the Migration was created. An RFC3339 formatted datetime string.
:param pulumi.Input[str] time_last_migration: The time of last Migration. An RFC3339 formatted datetime string.
:param pulumi.Input[str] time_updated: The time of the last Migration details update. An RFC3339 formatted datetime string.
:param pulumi.Input[str] type: (Updatable) Migration type.
:param pulumi.Input[pulumi.InputType['MigrationVaultDetailsArgs']] vault_details: (Updatable) Oracle Cloud Infrastructure Vault details to store migration and connection credentials secrets
:param pulumi.Input[str] wait_after: Name of a migration phase. The Job will wait after executing this phase until the Resume Job endpoint is called.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MigrationState.__new__(_MigrationState)
__props__.__dict__["agent_id"] = agent_id
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["credentials_secret_id"] = credentials_secret_id
__props__.__dict__["data_transfer_medium_details"] = data_transfer_medium_details
__props__.__dict__["datapump_settings"] = datapump_settings
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["display_name"] = display_name
__props__.__dict__["exclude_objects"] = exclude_objects
__props__.__dict__["executing_job_id"] = executing_job_id
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["golden_gate_details"] = golden_gate_details
__props__.__dict__["lifecycle_details"] = lifecycle_details
__props__.__dict__["source_container_database_connection_id"] = source_container_database_connection_id
__props__.__dict__["source_database_connection_id"] = source_database_connection_id
__props__.__dict__["state"] = state
__props__.__dict__["system_tags"] = system_tags
__props__.__dict__["target_database_connection_id"] = target_database_connection_id
__props__.__dict__["time_created"] | |
u('\u6d59\u6c5f\u7701\u6e56\u5dde\u5e02')},
'861385721':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861385720':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861385723':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861385722':{'en': 'Zhoushan, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u821f\u5c71\u5e02')},
'861450602':{'en': 'Songyuan, Jilin', 'zh': u('\u5409\u6797\u7701\u677e\u539f\u5e02')},
'86138442':{'en': 'Jilin, Jilin', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'86138443':{'en': 'Yanbian, Jilin', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')},
'86138440':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'86138441':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'86138446':{'en': 'Jilin, Jilin', 'zh': u('\u5409\u6797\u7701\u5409\u6797\u5e02')},
'86138447':{'en': 'Yanbian, Jilin', 'zh': u('\u5409\u6797\u7701\u5ef6\u8fb9\u671d\u9c9c\u65cf\u81ea\u6cbb\u5dde')},
'86138444':{'en': 'Siping, Jilin', 'zh': u('\u5409\u6797\u7701\u56db\u5e73\u5e02')},
'86138445':{'en': 'Tonghua, Jilin', 'zh': u('\u5409\u6797\u7701\u901a\u5316\u5e02')},
'86138448':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'86138449':{'en': 'Changchun, Jilin', 'zh': u('\u5409\u6797\u7701\u957f\u6625\u5e02')},
'861454566':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u4e50\u5c71\u5e02')},
'861454136':{'en': '<NAME>', 'zh': u('\u6cb3\u5317\u7701\u5eca\u574a\u5e02')},
'861450608':{'en': '<NAME>', 'zh': u('\u5409\u6797\u7701\u767d\u5c71\u5e02')},
'861380393':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6fee\u9633\u5e02')},
'861380392':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9e64\u58c1\u5e02')},
'861380391':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u7126\u4f5c\u5e02')},
'861380390':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5e73\u9876\u5c71\u5e02')},
'861380397':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861380396':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u9a7b\u9a6c\u5e97\u5e02')},
'861380395':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u6f2f\u6cb3\u5e02')},
'861380394':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5468\u53e3\u5e02')},
'861380399':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861380398':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u4e09\u95e8\u5ce1\u5e02')},
'861384894':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861384895':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861384896':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861384897':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861384890':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')},
'861384891':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861384892':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861384893':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861384898':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861384899':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861452478':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452479':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452474':{'en': 'Hegang, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e64\u5c97\u5e02')},
'861452475':{'en': 'Shuangyashan, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u53cc\u9e2d\u5c71\u5e02')},
'861452476':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452477':{'en': '<NAME>', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u54c8\u5c14\u6ee8\u5e02')},
'861452470':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861452471':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861452472':{'en': 'Daqing, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u5927\u5e86\u5e02')},
'861452473':{'en': 'Jixi, Heilongjiang', 'zh': u('\u9ed1\u9f99\u6c5f\u7701\u9e21\u897f\u5e02')},
'861454831':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'86138910':{'en': 'Xianyang, Shaanxi', 'zh': u('\u9655\u897f\u7701\u54b8\u9633\u5e02')},
'86138911':{'en': 'YanAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u5ef6\u5b89\u5e02')},
'86138912':{'en': 'Yulin, Shaanxi', 'zh': u('\u9655\u897f\u7701\u6986\u6797\u5e02')},
'86138913':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861450186':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'86138915':{'en': 'An<NAME>', 'zh': u('\u9655\u897f\u7701\u5b89\u5eb7\u5e02')},
'86138916':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'86138917':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u5b9d\u9e21\u5e02')},
'86138918':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'86138919':{'en': 'XiAn, Shaanxi', 'zh': u('\u9655\u897f\u7701\u897f\u5b89\u5e02')},
'861450188':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861450189':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861387271':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861387270':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861387273':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861387272':{'en': 'Enshi, Hubei', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861387275':{'en': 'Shiyan, Hubei', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861387274':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u6069\u65bd\u571f\u5bb6\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861387277':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861387276':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861387279':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861387278':{'en': '<NAME>', 'zh': u('\u6e56\u5317\u7701\u5341\u5830\u5e02')},
'861391456':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861391457':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861391454':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861391455':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861391452':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861391453':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861391450':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861391451':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u6cf0\u5dde\u5e02')},
'861380977':{'en': 'Guangzhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u5e7f\u5dde\u5e02')},
'861380976':{'en': 'Maoming, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'861380975':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861380974':{'en': 'Heyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6cb3\u6e90\u5e02')},
'861380973':{'en': 'Zhanjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e5b\u6c5f\u5e02')},
'861380972':{'en': 'Yangjiang, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u9633\u6c5f\u5e02')},
'861391458':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861391459':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u9547\u6c5f\u5e02')},
'861379906':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861379907':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861379904':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861379905':{'en': 'Zhangzhou, Fujian', 'zh': u('\u798f\u5efa\u7701\u6f33\u5dde\u5e02')},
'861379902':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'861379903':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u4e09\u660e\u5e02')},
'861379900':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861379901':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u8386\u7530\u5e02')},
'861379466':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'861379908':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861379909':{'en': '<NAME>', 'zh': u('\u798f\u5efa\u7701\u9f99\u5ca9\u5e02')},
'861389707':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389706':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389705':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389704':{'en': 'Hainan, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389703':{'en': 'Huangnan, Qinghai', 'zh': u('\u9752\u6d77\u7701\u9ec4\u5357\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389702':{'en': 'Haidong, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u4e1c\u5730\u533a')},
'861389701':{'en': 'Haibei, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u5317\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389700':{'en': 'Haibei, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u5317\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861453987':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861453986':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'861453985':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861453984':{'en': 'Zhaoqing, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u8087\u5e86\u5e02')},
'861453983':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861453982':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'861389709':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'861389708':{'en': 'Haixi, Qinghai', 'zh': u('\u9752\u6d77\u7701\u6d77\u897f\u8499\u53e4\u65cf\u85cf\u65cf\u81ea\u6cbb\u5dde')},
'86139751':{'en': '<NAME>', 'zh': u('\u6e56\u5357\u7701\u957f\u6c99\u5e02')},
'861458409':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861399163':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861458407':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861458406':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861458405':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u90d1\u5dde\u5e02')},
'861458404':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861458403':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861399160':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861458401':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861458400':{'en': '<NAME>', 'zh': u('\u6cb3\u5357\u7701\u5546\u4e18\u5e02')},
'861399161':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6c49\u4e2d\u5e02')},
'861399166':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861399167':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861454458':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u666f\u5fb7\u9547\u5e02')},
'861454459':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u840d\u4e61\u5e02')},
'861399164':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861454450':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')},
'861454451':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861454452':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861399165':{'en': '<NAME>', 'zh': u('\u9655\u897f\u7701\u6e2d\u5357\u5e02')},
'861454454':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u629a\u5dde\u5e02')},
'861454455':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5b9c\u6625\u5e02')},
'861454456':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5409\u5b89\u5e02')},
'861454457':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u8d63\u5dde\u5e02')},
'861386636':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861386637':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861386634':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861386635':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861386632':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861386633':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861386630':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861386631':{'en': 'Huainan, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5357\u5e02')},
'861386638':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861386639':{'en': 'Wuhu, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u829c\u6e56\u5e02')},
'861381949':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861381948':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861381945':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861381944':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861381947':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861381946':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861381941':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861381940':{'en': 'Jiaxing, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5609\u5174\u5e02')},
'861381943':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861381942':{'en': 'Ningbo, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u5b81\u6ce2\u5e02')},
'861390886':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861390887':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861390884':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861390885':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861390882':{'en': '<NAME>', 'zh': u('\u4e91\u5357\u7701\u5fb7\u5b8f\u50a3\u65cf\u666f\u9887\u65cf\u81ea\u6cbb\u5dde')},
'861390883':{'en': 'Lincang, Yunnan', 'zh': u('\u4e91\u5357\u7701\u4e34\u6ca7\u5e02')},
'861390880':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861390881':{'en': 'Xishuangbanna, Yunnan', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861390888':{'en': 'Lijiang, Yunnan', 'zh': u('\u4e91\u5357\u7701\u4e3d\u6c5f\u5e02')},
'861390889':{'en': 'Yuxi, Yunnan', 'zh': u('\u4e91\u5357\u7701\u7389\u6eaa\u5e02')},
'861378974':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861378975':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861378976':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861378977':{'en': 'Ordos, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u9102\u5c14\u591a\u65af\u5e02')},
'861378970':{'en': 'Hulun, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u4f26\u8d1d\u5c14\u5e02')},
'861378971':{'en': 'Tongliao, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u901a\u8fbd\u5e02')},
'861378972':{'en': 'Baotou, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5305\u5934\u5e02')},
'861378973':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861378978':{'en': 'Bayannur, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5df4\u5f66\u6dd6\u5c14\u5e02')},
'861378979':{'en': 'Chifeng, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u8d64\u5cf0\u5e02')},
'861397218':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861397219':{'en': 'Wuhan, Hubei', 'zh': u('\u6e56\u5317\u7701\u6b66\u6c49\u5e02')},
'861452276':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u57ce\u5e02')},
'861452277':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u4e34\u6c7e\u5e02')},
'861452274':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861452275':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u957f\u6cbb\u5e02')},
'861452272':{'en': '<NAME>', 'zh': u('\u5c71\u897f\u7701\u664b\u4e2d\u5e02')},
'861452273':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861452270':{'en': 'Xinzhou, Shanxi', 'zh': u('\u5c71\u897f\u7701\u5ffb\u5dde\u5e02')},
'861452271':{'en': 'Taiyuan, Shanxi', 'zh': u('\u5c71\u897f\u7701\u592a\u539f\u5e02')},
'861457183':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861457182':{'en': 'Wenzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u6e29\u5dde\u5e02')},
'861457181':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861457180':{'en': 'Hangzhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u676d\u5dde\u5e02')},
'861457187':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861457186':{'en': 'Taizhou, Zhejiang', 'zh': u('\u6d59\u6c5f\u7701\u53f0\u5dde\u5e02')},
'861452278':{'en': u('L\u00fcliang, Shanxi'), 'zh': u('\u5c71\u897f\u7701\u5415\u6881\u5e02')},
'861452279':{'en': 'Yuncheng, Shanxi', 'zh': u('\u5c71\u897f\u7701\u8fd0\u57ce\u5e02')},
'861457201':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861457202':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861381369':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861381368':{'en': 'Changzhou, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861381361':{'en': 'Nantong, Jiangsu', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381360':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381363':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381362':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381365':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861381364':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u901a\u5e02')},
'861381367':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861381366':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5e38\u5dde\u5e02')},
'861457204':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861394749':{'en': 'Alxa, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u963f\u62c9\u5584\u76df')},
'861394748':{'en': 'Hinggan, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u5174\u5b89\u76df')},
'861450287':{'en': 'Hohhot, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u547c\u548c\u6d69\u7279\u5e02')},
'861394741':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394740':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394743':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394742':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394745':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394744':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394747':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'861394746':{'en': 'Ulanqab, Inner Mongolia', 'zh': u('\u5185\u8499\u53e4\u4e4c\u5170\u5bdf\u5e03\u5e02')},
'86139276':{'en': 'Qingyuan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6e05\u8fdc\u5e02')},
'86139277':{'en': 'Foshan, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86139274':{'en': 'Shenzhen, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u6df1\u5733\u5e02')},
'86139275':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u8302\u540d\u5e02')},
'86139272':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4f5b\u5c71\u5e02')},
'86139273':{'en': 'Huizhou, Guangdong', 'zh': u('\u5e7f\u4e1c\u7701\u60e0\u5dde\u5e02')},
'86139270':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u63ed\u9633\u5e02')},
'86139271':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u4e91\u6d6e\u5e02')},
'861457205':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'86139278':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u97f6\u5173\u5e02')},
'86139279':{'en': '<NAME>', 'zh': u('\u5e7f\u4e1c\u7701\u6c55\u5c3e\u5e02')},
'861390088':{'en': 'Xishuangbanna, Yunnan', 'zh': u('\u4e91\u5357\u7701\u897f\u53cc\u7248\u7eb3\u50a3\u65cf\u81ea\u6cbb\u5dde')},
'861390089':{'en': 'Qamdo, Tibet', 'zh': u('\u897f\u85cf\u660c\u90fd\u5730\u533a')},
'861390086':{'en': 'Jingmen, Hubei', 'zh': u('\u6e56\u5317\u7701\u8346\u95e8\u5e02')},
'861390087':{'en': 'Kunming, Yunnan', 'zh': u('\u4e91\u5357\u7701\u6606\u660e\u5e02')},
'861390084':{'en': 'Yueyang, Hunan', 'zh': u('\u6e56\u5357\u7701\u5cb3\u9633\u5e02')},
'861390085':{'en': 'Beijing', 'zh': u('\u5317\u4eac\u5e02')},
'861390082':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'861390083':{'en': 'Chongqing', 'zh': u('\u91cd\u5e86\u5e02')},
'861390080':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861390081':{'en': '<NAME>', 'zh': u('\u56db\u5ddd\u7701\u6210\u90fd\u5e02')},
'861380708':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861380709':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861390785':{'en': '<NAME>', 'zh': u('\u5e7f\u897f\u7389\u6797\u5e02')},
'861380700':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861380701':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u9e70\u6f6d\u5e02')},
'861380702':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u4e5d\u6c5f\u5e02')},
'861380703':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861380704':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861380705':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861380706':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u5357\u660c\u5e02')},
'861380707':{'en': '<NAME>', 'zh': u('\u6c5f\u897f\u7701\u8d63\u5dde\u5e02')},
'861398598':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'861398599':{'en': '<NAME>', 'zh': u('\u8d35\u5dde\u7701\u9ed4\u897f\u5357\u5e03\u4f9d\u65cf\u82d7\u65cf\u81ea\u6cbb\u5dde')},
'86138561':{'en': 'Huaibei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u6dee\u5317\u5e02')},
'861457207':{'en': 'Mianyang, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u7ef5\u9633\u5e02')},
'861398925':{'en': 'Liangshan, Sichuan', 'zh': u('\u56db\u5ddd\u7701\u51c9\u5c71\u5f5d\u65cf\u81ea\u6cbb\u5dde')},
'861379208':{'en': '<NAME>', 'zh': u('\u5c71\u4e1c\u7701\u4e1c\u8425\u5e02')},
'861453600':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861453601':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861453602':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861453603':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861453604':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861453605':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u5357\u4eac\u5e02')},
'861453606':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861453607':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861453608':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'861453609':{'en': '<NAME>', 'zh': u('\u6c5f\u82cf\u7701\u65e0\u9521\u5e02')},
'86138560':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861380568':{'en': '<NAME>', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
'861380569':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861380564':{'en': 'LuAn, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u516d\u5b89\u5e02')},
'861380565':{'en': 'Hefei, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5408\u80a5\u5e02')},
'861380566':{'en': 'Anqing, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u5b89\u5e86\u5e02')},
'861380567':{'en': 'Fuyang, Anhui', 'zh': u('\u5b89\u5fbd\u7701\u961c\u9633\u5e02')},
| |
from touchworks.logger import Logger
import json
import uuid
import requests
import time
logger = Logger.get_logger(__name__)
class TouchWorksException(Exception):
pass
class TouchWorksErrorMessages(object):
GET_TOKEN_FAILED_ERROR = 'unable to acquire the token from web service'
MAGIC_JSON_FAILED = 'magic json api failed'
class SecurityToken(object):
def __init__(self, token, acquired_time=None):
if not token:
raise Exception('token can not be empty')
if not acquired_time:
self.acquired_time = time.time()
else:
self.acquired_time = acquired_time
self.token = token
class TouchWorksEndPoints(object):
GET_TOKEN = 'json/GetToken'
MAGIC_JSON = 'json/MagicJson'
class TouchWorksMagicConstants(object):
ACTION_SEARCH_PATIENTS = 'SearchPatients'
RESULT_SEARCH_PATIENTS = 'searchpatientsinfo'
ACTION_GET_DOCUMENTS = 'GetDocuments'
RESULT_GET_DOCUMENTS = 'getdocumentsinfo'
ACTION_GET_SCHEDULE = 'GetSchedule'
RESULT_GET_SCHEDULE = 'getscheduleinfo'
ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT = 'GetEncounterListForPatient'
RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT = 'getencounterlistforpatientinfo'
ACTION_GET_PATIENT_INFO = 'GetPatient'
RESULT_GET_PATIENT_INFO = 'getpatientinfo'
ACTION_GET_ENCOUNTER = 'GetEncounter'
RESULT_GET_ENCOUNTER = 'getencounterinfo'
ACTION_SAVE_UNSTRUCTURED_DATA = 'SaveUnstructuredDocument'
RESULT_SAVE_UNSTRUCTURED_DATA = 'saveunstructureddocument'
ACTION_GET_DOCUMENT_TYPE = 'GetDocumentType'
RESULT_GET_DOCUMENT_TYPE = 'getdocumenttypeinfo'
ACTION_GET_DICTIONARY = 'GetDictionary'
RESULT_GET_DICTIONARY = 'getdictionaryinfo'
ACTION_SAVE_NOTE = 'SaveNote'
RESULT_SAVE_NOTE = 'savenoteinfo'
ACTION_GET_TASKLIST_BY_VIEW = 'GetTaskListByView'
RESULT_GET_TASKLISTBY_VIEW = 'gettasklistbyviewinfo'
ACTION_GET_DELEGATES = 'GetDelegates'
RESULT_GET_DELEGATES = 'getdelegatesinfo'
ACTION_GET_TASK_COMMENTS = 'GetTaskComments'
RESULT_GET_TASK_COMMENTS = 'gettaskcommentsinfo'
ACTION_SAVE_TASK = 'savetask'
RESULT_SAVE_TASK = 'savetaskinfo'
ACTION_SEARCH_TASK_VIEWS = 'SearchTaskViews'
RESULT_SEARCH_TASK_VIEWS = 'searchtaskviewsinfo'
ACTION_SAVE_TASK_STATUS = 'SaveTaskStatus'
RESULT_SAVE_TASK_STATUS = 'savetaskstatusinfo'
ACTION_GET_TASK = 'GetTask'
RESULT_GET_TASK = 'gettaskinfo'
ACTION_SAVE_TASK_COMMENT = 'SaveTaskComent'
RESULT_SAVE_TASK_COMMENT = 'savetaskcommentinfo'
ACTION_SAVE_MSG_FROM_PAT_PORTAL = 'SaveMsgFromPatPortal'
RESULT_SAVE_MSG_FROM_PAT_PORTAL = 'savemsgfrompatportalinfo'
ACTION_GET_TASK_LIST = 'GetTaskList'
RESULT_GET_TASK_LIST = 'gettasklistinfo'
ACTION_SET_PATIENT_LOCATION_AND_STATUS = 'SetPatientLocationAndStatus'
RESULT_SET_PATIENT_LOCATION_AND_STATUS = 'setpatientlocationandstatusinfo'
ACTION_GET_CLINICAL_SUMMARY = 'GetClinicalSummary'
RESULT_GET_CLINICAL_SUMMARY = 'getclinicalsummaryinfo'
ACTION_GET_PATIENT_ACTIVITY = 'GetPatientActivity'
RESULT_GET_PATIENT_ACTIVITY = 'getpatientactivityinfo'
ACTION_GET_PATIENT_PHARAMCIES = 'GetPatientPharmacies '
RESULT_GET_PATIENT_PHARAMCIES = 'getpatientpharmaciesinfo'
ACTION_SET_PATIENT_MEDHX_FLAG = 'SetPatientMedHXFlag '
RESULT_SET_PATIENT_MEDHX_FLAG = 'setpatientmedhxflaginfo'
ACTION_GET_CHANGED_PATIENTS = 'GetChangedPatients '
RESULT_GET_CHANGED_PATIENTS = 'getchangedpatientsinfo'
ACTION_GET_PATIENT_LOCATIONS = 'GetPatientLocations '
RESULT_GET_PATIENT_LOCATIONS = 'getpatienlLocationsinfo'
ACTION_GET_USER_ID = 'GetUserID '
RESULT_GET_USER_ID = 'getuseridinfo'
ACTION_GET_PROVIDER = 'GetProvider'
RESULT_GET_PROVIDER = 'getproviderinfo'
ACTION_GET_PROVIDER_INFO = 'GetProviderInfo'
RESULT_GET_PROVIDER_INFO = 'getproviderinfoinfo'
ACTION_GET_PROVIDERS = 'GetProviders'
RESULT_GET_PROVIDERS = 'getprovidersinfo'
ACTION_GET_USER_PREFERENCES = 'GetUserPreferences'
RESULT_GET_USER_PREFERENCES = 'getuserpreferencesinfo'
class TouchWorks(object):
TOKEN_DEFAULT_TIMEOUT_IN_SECS = 20 * 60
def __init__(self, base_url, username,
password, app_name, cache_token=True,
token_timeout=TOKEN_DEFAULT_TIMEOUT_IN_SECS,
app_username=None):
"""
creates an instance of TouchWorks, connects to the TouchWorks Web Service
and caches username, password, app_name
:param base_url: required
:param username: required
:param password: required
:param app_name: required
:param cache_token: optional
:param token_timeout: optional
:param app_username: optional
:return:
"""
if not base_url:
raise ValueError('base_url can not be null')
if not username:
raise ValueError('username can not be null')
if not password:
raise ValueError('password can not be null')
if not app_name:
raise ValueError('app_name can not be null')
self._base_url = base_url
self._app_name = app_name
self._username = username
# FIXME: store username, password only if user decided to cache token
self._password = password
self._token_timeout = token_timeout
self._ehr_username = app_username
self._cache_token = cache_token
self._token = self.get_token(self._app_name, self._username, self._password)
def get_token(self, appname, username, password):
"""
get the security token by connecting to TouchWorks API
"""
ext_exception = TouchWorksException(
TouchWorksErrorMessages.GET_TOKEN_FAILED_ERROR)
data = {'Username': username,
'Password': password}
resp = self._http_request(TouchWorksEndPoints.GET_TOKEN, data)
try:
logger.debug('token : %s' % resp)
if not resp.text:
raise ext_exception
try:
uuid.UUID(resp.text, version=4)
return SecurityToken(resp.text)
except ValueError:
logger.error('response was not valid uuid string. %s' % resp.text)
raise ext_exception
except Exception as ex:
logger.exception(ex)
raise ext_exception
def _token_valid(self):
"""
checks if the token cached is valid or has expired by comparing
the time token was created with current time
:return: True if token has not expired yet and False is token is empty or
it has expired
"""
if not self._cache_token:
return False
now = time.time()
if now - self._token.acquired_time > self._token_timeout:
logger.debug('token needs to be reset')
return False
return True
def _http_request(self, api, data, headers=None):
"""
internal method for handling request and response
and raising an exception is http return status code is not success
:rtype : response object from requests.post()
"""
if not headers:
headers = {'Content-Type': 'application/json'}
if not self._token_valid:
self._token = self.get_token(self._app_name, self._username, self._password)
response = requests.post(self._base_url + '/' + api, data=json.dumps(data),
headers=headers)
# raise an exception if the status was not 200
logger.debug(json.dumps(data))
logger.debug(response.text)
response.raise_for_status()
return response
def save_note(self, note_text, patient_id,
document_type,
document_status='Unsigned', wrapped_in_rtf='N'):
"""
invokes TouchWorksMagicConstants.ACTION_SAVE_NOTE action
:return: JSON response
"""
allowed_document_status = ['Unsigned', 'Final']
if document_status not in ['Unsigned', 'Final']:
raise ValueError('document_status was invalid. allowed values are %s' %
allowed_document_status)
magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_SAVE_NOTE,
patient_id=patient_id,
parameter1=note_text,
parameter2=document_type,
parameter3=document_status,
parameter4=wrapped_in_rtf)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_NOTE)
return result
def search_patients(self, search_criteria,
include_picture='N', organization_id=None):
"""
invokes TouchWorksMagicConstants.ACTION_SEARCH_PATIENTS action
:return: JSON response
"""
include_picture = include_picture or ''
organization_id = organization_id or ''
magic = self._magic_json(action=TouchWorksMagicConstants.ACTION_SEARCH_PATIENTS,
app_name=self._app_name,
token=self._token.token,
parameter1=search_criteria,
parameter2=include_picture,
parameter3=organization_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SEARCH_PATIENTS)
return result
def get_document_type(self, ehr_username, doc_type):
"""
invokes TouchWorksMagicConstants.ACTION_GET_DOCUMENT_TYPE action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_DOCUMENT_TYPE,
app_name=self._app_name,
user_id=ehr_username,
token=self._token.token,
parameter1=doc_type
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_DOCUMENT_TYPE)
return result
def get_patient(self, ehr_username, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_PATIENT_INFO action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_INFO,
app_name=self._app_name,
user_id=ehr_username,
token=self._token.token,
patient_id=patient_id
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_INFO)
return result
def get_encounter(self, ehr_username, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_ENCOUNTER,
app_name=self._app_name,
user_id=ehr_username,
token=self._token.token,
patient_id=patient_id
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER)
return result
def get_dictionary(self, dictionary_name):
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_DICTIONARY,
parameter1=dictionary_name,
app_name=self._app_name,
token=self._token.token)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_DICTIONARY)
return result
def find_document_type_by_name(self, entity_name, active='Y',
match_case=True):
"""
search document types by name and active(Y/N) status
:param entity_name: entity name
:return:
"""
all_types = self.get_dictionary('Document_Type_DE')
if match_case:
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].find(entity_name) >= 0,
all_types)
else:
token = entity_name.lower()
filtered = filter(
lambda x: x['Active'] == active and x['EntryName'].lower().find(token) >= 0,
all_types)
return filtered
def get_encounter_list_for_patient(self, patient_id):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT,
app_name=self._app_name,
token=self._token.token,
patient_id=patient_id)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_ENCOUNTER_LIST_FOR_PATIENT)
return result
def save_unstructured_document(self, ehr_username,
patient_id,
encounter_id,
document_content):
"""
invokes TouchWorksMagicConstants.ACTION_SAVE_UNSTRUCTURED_DATA action
:return: JSON response
"""
doc_xml = "<docParams><item name='documentCommand' value='I'/>" + \
"<item name='documentType' value='Chart'/>" + \
"<item name='authorCode' value='ResLet'/>" + \
"<item name='ahsEncounterID' value='@@ENCOUNTERID@@'/>" + \
"<item name='OrganizationID' value=''/>" + \
"<item name='accessionValue' value=''/>" + \
"<item name='appGroup' value='TouchWorks'/></docParams>"
doc_xml = doc_xml.replace("@@ENCOUNTERID@@", str(encounter_id))
print(doc_xml)
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SAVE_UNSTRUCTURED_DATA,
patient_id=patient_id,
user_id=ehr_username,
parameter1=doc_xml,
parameter2=document_content)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SAVE_UNSTRUCTURED_DATA)
return result
def set_patient_location_and_status(self, patient_id,
encounter_status,
patient_location):
"""
invokes TouchWorksMagicConstants.ACTION_SET_PATIENT_LOCATION_AND_STATUS action
:param encounter_status - EntryName from the Encounter_Status_DE dictionary.
The desired entryname can be looked up with the GetDictionary action.
:param patient_location - EntryName from the Site_Location_DE dictionary.
The desired entryname can be looked up with the GetDictionary action.
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_LOCATION_AND_STATUS,
patient_id=patient_id,
parameter1=encounter_status,
parameter2=patient_location)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_LOCATION_AND_STATUS)
return result
def get_clinical_summary(self, patient_id,
section,
encounter_id_identifer,
verbose=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_CLINICAL_SUMMARY action
:param patient_id:
:param section - if one of the following values is specified, Section indicates
which section of clinical data to return. If no Section is specified,
all sections with data are returned. You can specify multiple sections
using a pipe-delimited list. For example, "Vitals|Results."
List
ChiefComplaint
Vitals
Activities
Alerts
Problems
Results
History
Medications
Allergies
Immunizations
Orders
:param encounter_id_identifer - identifier for the encounter. Used in conjunction with
the "ChiefComplaint" when called in Parameter1. EncounterID can be acquired
with the Unity call GetEncounterList.
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_CLINICAL_SUMMARY,
patient_id=patient_id,
parameter1=section,
parameter2=encounter_id_identifer,
parameter3=verbose)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_CLINICAL_SUMMARY)
return result
def get_patient_activity(self, patient_id, since=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_PATIENT_ACTIVITY,
patient_id=patient_id,
parameter1=since)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_PATIENT_ACTIVITY)
return result
def set_patient_medhx_flag(self, patient_id,
medhx_status):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:param patient_id
:param medhx_status - Field in EEHR expects U, G, or D. SP defaults to Null and
errors out if included.
U=Unknown
G=Granted
D=Declined
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_SET_PATIENT_MEDHX_FLAG,
patient_id=patient_id,
parameter1=medhx_status
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_SET_PATIENT_MEDHX_FLAG)
return result
def get_changes_patients(self, patient_id,
since,
clinical_data_only='Y',
verbose='Y',
quick_scan='Y',
which_field='',
what_value=''):
"""
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action
:return: JSON response
"""
magic = self._magic_json(
action=TouchWorksMagicConstants.ACTION_GET_CHANGED_PATIENTS,
patient_id=patient_id,
parameter1=since,
parameter2=clinical_data_only,
parameter3=verbose,
parameter4=quick_scan,
parameter5=which_field,
parameter6=what_value
)
response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic)
result = self._get_results_or_raise_if_magic_invalid(
magic,
response,
TouchWorksMagicConstants.RESULT_GET_CHANGED_PATIENTS)
return | |
xconv7a = bn()(xconv7a)
merge0=concatenate([xup7,xconv7a])
xconv7b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge0)
xconv7b = bn()(xconv7b)
merge1=concatenate([xup7,xconv7a,xconv7b])
xconv7c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xconv7c = bn()(xconv7c)
merge2=concatenate([xup7,xconv7a,xconv7b,xconv7c])
xconv7d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xconv7d = bn()(xconv7d)
merge3=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d])
xconv7e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
xconv7e = bn()(xconv7e)
merge4=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e])
xconv7f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
xconv7f = bn()(xconv7f)
merge5=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f])
xconv7g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
xconv7g = bn()(xconv7g)
merge6=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g])
xconv7h = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
xconv7h = bn()(xconv7h)
merge7=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h])
xconv7i = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge7)
xconv7i = bn()(xconv7i)
merge8=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i])
xconv7j = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge8)
xconv7j = bn()(xconv7j)
merge9=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j])
xconv7k = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge9)
xconv7k = bn()(xconv7k)
merge10=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k])
xconv7l=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge10)
xconv7l = bn()(xconv7l)
merge11=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l])
xconv7m=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge11)
xconv7m = bn()(xconv7m)
merge12=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m])
xconv7n=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge12)
xconv7n = bn()(xconv7n)
merge13=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n])
xconv7o=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge13)
xconv7o = bn()(xconv7o)
merge14=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n,xconv7o])
xconv7p=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge14)
xconv7p = bn()(xconv7p)
merge15=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n,xconv7o,xconv7p])
xconv7q=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge15)
xconv7q = bn()(xconv7q)
merge16=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n,xconv7o,xconv7p,xconv7q])
xconv7r=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge16)
xconv7r = bn()(xconv7r)
merge17=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n,xconv7o,xconv7p,xconv7q,xconv7r])
xconv7s=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge17)
xconv7s = bn()(xconv7s)
merge18=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n,xconv7o,xconv7p,xconv7q,xconv7r,xconv7s])
xconv7t=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge18)
xconv7t = bn()(xconv7t)
merge19=concatenate([xup7,xconv7a,xconv7b,xconv7c,xconv7d,xconv7e,xconv7f,xconv7g,xconv7h,xconv7i,xconv7j,xconv7k,xconv7l,xconv7m,xconv7n,xconv7o,xconv7p,xconv7q,xconv7r,xconv7s,xconv7t])
xconv7u=Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge19)
xconv7u = bn()(xconv7u)
xup8 = concatenate([Conv2DTranspose(12,(2, 2), strides=(2, 2), padding='same')(xconv7u), conv2j],name='xup8', axis=3)
xup8 = Dropout(DropP)(xup8)
#add third xoutxout here
xout8=Conv2DTranspose(12,(2, 2), strides=(2, 2), padding='same')(xup8)
xout8 = bn()(xout8)
xoutput3 = Conv2D(1, (1, 1), activation='sigmoid',name='xoutput3')(xout8)
xconv8a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xup8)
xconv8a = bn()(xconv8a)
merge0=concatenate([xup8,xconv8a])
xconv8b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge0)
xconv8b = bn()(xconv8b)
merge1=concatenate([xup8,xconv8a,xconv8b])
xconv8c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xconv8c = bn()(xconv8c)
merge2=concatenate([xup8,xconv8a,xconv8b,xconv8c])
xconv8d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xconv8d = bn()(xconv8d)
merge3=concatenate([xup8,xconv8a,xconv8b,xconv8c,xconv8d])
xconv8e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
xconv8e = bn()(xconv8e)
merge4=concatenate([xup8,xconv8a,xconv8b,xconv8c,xconv8d,xconv8e])
xconv8f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
xconv8f = bn()(xconv8f)
merge5=concatenate([xup8,xconv8a,xconv8b,xconv8c,xconv8d,xconv8e,xconv8f])
xconv8g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
xconv8g = bn()(xconv8g)
merge6=concatenate([xup8,xconv8a,xconv8b,xconv8c,xconv8d,xconv8e,xconv8f,xconv8g])
xconv8h = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
xconv8h = bn()(xconv8h)
merge7=concatenate([xup8,xconv8a,xconv8b,xconv8c,xconv8d,xconv8e,xconv8f,xconv8g,xconv8h])
xconv8i = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge7)
xconv8i = bn()(xconv8i)
merge8=concatenate([xup8,xconv8a,xconv8b,xconv8c,xconv8d,xconv8e,xconv8f,xconv8g,xconv8h,xconv8i])
xconv8j = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge8)
xconv8j = bn()(xconv8j)
xup9 = concatenate([Conv2DTranspose(12,(2, 2), strides=(2, 2), padding='same')(xconv8j), conv1d],name='xup9',axis=3)
xup9 = Dropout(DropP)(xup9)
xout9=Conv2DTranspose(12,(2, 2), strides=(1, 1), padding='same')(xup9)
xout9 = bn()(xout9)
xoutput4 = Conv2D(1, (1, 1), activation='sigmoid',name='xoutput4')(xout9)
xconv9a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xup9)
xconv9a = bn()(xconv9a)
merge0=concatenate([xup9,xconv9a])
xconv9b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge0)
xconv9b = bn()(xconv9b)
merge1=concatenate([xup9,xconv9a,xconv9b])
xconv9c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xconv9c = bn()(xconv9c)
merge2=concatenate([xup9,xconv9a,xconv9b,xconv9c])
xconv9d = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xconv9d = bn()(xconv9d)
xconv10 = Conv2D(1, (1, 1), activation='sigmoid',name='xconv10')(xconv9d)
xfinalmerge=concatenate([xout6,xout7,xout8,xout9,xconv9d])
xfinal_op=Conv2D(1, (1, 1), activation='sigmoid',name='xfinal_op')(xfinalmerge)
u_net_op0=keras.layers.add([final_op,xfinal_op])
u_net_op1=keras.layers.add([conv10,xconv10])
u_net_op2=keras.layers.add([output4,xoutput4])
u_net_op3=keras.layers.add([output3,xoutput3])
u_net_op4=keras.layers.add([output2,xoutput2])
u_net_op5=keras.layers.add([output1,xoutput1])
#Concatenation fed to the reconstruction layer
u_net_op_merge=concatenate([u_net_op0,u_net_op1,u_net_op2,u_net_op3,u_net_op4,u_net_op5])
xxconv1a = Conv2D( 12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(u_net_op_merge)
xxconv1a = bn()(xxconv1a)
xxconv1b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxconv1a)
xxconv1b = bn()(xxconv1b)
merge1=concatenate([xxconv1a,xxconv1b])
xxconv1c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xxconv1c = bn()(xxconv1c)
merge2=concatenate([xxconv1a,xxconv1b,xxconv1c])
xxconv1d = Conv2D(32, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xxconv1d = bn()(xxconv1d)
xxpool1 = MaxPooling2D(pool_size=(2, 2))(xxconv1d)
xxpool1 = Dropout(DropP)(xxpool1)
xxconv2a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxpool1)
xxconv2a = bn()(xxconv2a)
xxconv2b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxconv2a)
xxconv2b = bn()(xxconv2b)
merge1=concatenate([xxconv2a,xxconv2b])
xxconv2c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xxconv2c = bn()(xxconv2c)
merge2=concatenate([xxconv2a,xxconv2b,xxconv2c])
xxconv2d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xxconv2d = bn()(xxconv2d)
merge3=concatenate([xxconv2a,xxconv2b,xxconv2c,xxconv2d])
xxconv2e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
xxconv2e = bn()(xxconv2e)
merge4=concatenate([xxconv2a,xxconv2b,xxconv2c,xxconv2d,xxconv2e])
xxconv2f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
xxconv2f = bn()(xxconv2f)
merge5=concatenate([xxconv2a,xxconv2b,xxconv2c,xxconv2d,xxconv2e,xxconv2f])
xxconv2g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
xxconv2g = bn()(xxconv2g)
merge6=concatenate([xxconv2a,xxconv2b,xxconv2c,xxconv2d,xxconv2e,xxconv2f,xxconv2g])
xxconv2h = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
xxconv2h = bn()(xxconv2h)
merge7=concatenate([xxconv2a,xxconv2b,xxconv2c,xxconv2d,xxconv2e,xxconv2f,xxconv2g,xxconv2h])
xxconv2i = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge7)
xxconv2i = bn()(xxconv2g)
merge8=concatenate([xxconv2a,xxconv2b,xxconv2c,xxconv2d,xxconv2e,xxconv2f,xxconv2g,xxconv2h,xxconv2i])
xxconv2j = Conv2D(64, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge8)
xxconv2j = bn()(xxconv2g)
xxpool2 = MaxPooling2D(pool_size=(2, 2))(xxconv2j)
xxpool2 = Dropout(DropP)(xxpool2)
xxconv3a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxpool2)
xxconv3a = bn()(xxconv3a)
xxconv3b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxconv3a)
xxconv3b = bn()(xxconv3b)
merge1=concatenate([xxconv3a,xxconv3b])
xxconv3c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xxconv3c = bn()(xxconv3c)
merge2=concatenate([xxconv3a,xxconv3b,xxconv3c])
xxconv3d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xxconv3d = bn()(xxconv3d)
merge3=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d])
xxconv3e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
xxconv3e = bn()(xxconv3e)
merge4=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e])
xxconv3f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
xxconv3f = bn()(xxconv3f)
merge5=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f])
xxconv3g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
xxconv3g = bn()(xxconv3g)
merge6=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g])
xxconv3h = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
xxconv3h = bn()(xxconv3h)
merge7=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h])
xxconv3i = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge7)
xxconv3i = bn()(xxconv3i)
merge8=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i])
xxconv3j = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge8)
xxconv3j = bn()(xxconv3j)
merge9=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j])
xxconv3k = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge9)
xxconv3k = bn()(xxconv3k)
merge10=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k])
xxconv3l=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge10)
xxconv3l = bn()(xxconv3l)
merge11=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l])
xxconv3m=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge11)
xxconv3m = bn()(xxconv3m)
merge12=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m])
xxconv3n=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge12)
xxconv3n = bn()(xxconv3n)
merge13=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n])
xxconv3o=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge13)
xxconv3o = bn()(xxconv3o)
merge14=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n,xxconv3o])
xxconv3p=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge14)
xxconv3p = bn()(xxconv3p)
merge15=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n,xxconv3o,xxconv3p])
xxconv3q=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge15)
xxconv3q = bn()(xxconv3q)
merge16=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n,xxconv3o,xxconv3p,xxconv3q])
xxconv3r=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge16)
xxconv3r = bn()(xxconv3r)
merge17=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n,xxconv3o,xxconv3p,xxconv3q,xxconv3r])
xxconv3s=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge17)
xxconv3s = bn()(xxconv3s)
merge18=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n,xxconv3o,xxconv3p,xxconv3q,xxconv3r,xxconv3s])
xxconv3t=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge18)
xxconv3t = bn()(xxconv3t)
merge19=concatenate([xxconv3a,xxconv3b,xxconv3c,xxconv3d,xxconv3e,xxconv3f,xxconv3g,xxconv3h,xxconv3i,xxconv3j,xxconv3k,xxconv3l,xxconv3m,xxconv3n,xxconv3o,xxconv3p,xxconv3q,xxconv3r,xxconv3s,xxconv3t])
xxconv3u=Conv2D(128, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge19)
xxconv3u = bn()(xxconv3u)
xxpool3 = MaxPooling2D(pool_size=(2, 2))(xxconv3u)
xxpool3 = Dropout(DropP)(xxpool3)
xxconv4a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxpool3)
xxconv4a = bn()(xxconv4a)
xxconv4b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxconv4a)
xxconv4b = bn()(xxconv4b)
merge1=concatenate([xxconv4a,xxconv4b])
xxconv4c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xxconv4c = bn()(xxconv4c)
merge2=concatenate([xxconv4a,xxconv4b,xxconv4c])
xxconv4d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xxconv4d = bn()(xxconv4d)
merge3=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d])
xxconv4e = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge3)
xxconv4e = bn()(xxconv4e)
merge4=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e])
xxconv4f = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge4)
xxconv4f = bn()(xxconv4f)
merge5=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f])
xxconv4g = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge5)
xxconv4g = bn()(xxconv4g)
merge6=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g])
xxconv4h = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge6)
xxconv4h = bn()(xxconv4h)
merge7=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h])
xxconv4i = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge7)
xxconv4i = bn()(xxconv4i)
merge8=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i])
xxconv4j = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge8)
xxconv4j = bn()(xxconv4j)
merge9=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j])
xxconv4k = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge9)
xxconv4k = bn()(xxconv4k)
merge10=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k])
xxconv4l=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge10)
xxconv4l = bn()(xxconv4l)
merge11=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l])
xxconv4m=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge11)
xxconv4m = bn()(xxconv4m)
merge12=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m])
xxconv4n=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge12)
xxconv4n = bn()(xxconv4n)
merge13=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n])
xxconv4o=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge13)
xxconv4o = bn()(xxconv4o)
merge14=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n,xxconv4o])
xxconv4p=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge14)
xxconv4p = bn()(xxconv4p)
merge15=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n,xxconv4o,xxconv4p])
xxconv4q=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge15)
xxconv4q = bn()(xxconv4q)
merge16=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n,xxconv4o,xxconv4p,xxconv4q])
xxconv4r=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge16)
xxconv4r = bn()(xxconv4r)
merge17=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n,xxconv4o,xxconv4p,xxconv4q,xxconv4r])
xxconv4s=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge17)
xxconv4s = bn()(xxconv4s)
merge18=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n,xxconv4o,xxconv4p,xxconv4q,xxconv4r,xxconv4s])
xxconv4t=Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge18)
xxconv4t = bn()(xxconv4t)
merge19=concatenate([xxconv4a,xxconv4b,xxconv4c,xxconv4d,xxconv4e,xxconv4f,xxconv4g,xxconv4h,xxconv4i,xxconv4j,xxconv4k,xxconv4l,xxconv4m,xxconv4n,xxconv4o,xxconv4p,xxconv4q,xxconv4r,xxconv4s,xxconv4t])
xxconv4u=Conv2D(256, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge19)
xxconv4u = bn()(xxconv4u)
xxpool4 = MaxPooling2D(pool_size=(2, 2))(xxconv4u)
xxpool4 = Dropout(DropP)(xxpool4)
xxconv5a = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxpool4)
xxconv5a = bn()(xxconv5a)
xxconv5b = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(xxconv5a)
xxconv5b = bn()(xxconv5b)
merge1=concatenate([xxconv5a,xxconv5b])
xxconv5c = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge1)
xxconv5c = bn()(xxconv5c)
merge2=concatenate([xxconv5a,xxconv5b,xxconv5c])
xxconv5d = Conv2D(12, (kernel_size, kernel_size), activation='relu', padding='same',
kernel_regularizer=regularizers.l2(l2_lambda) )(merge2)
xxconv5d = bn()(xxconv5d)
| |
= 2
elif LA2 in {STATE}:
alt2 = 3
elif LA2 in {DATA}:
alt2 = 4
elif LA2 in {NID, SET}:
alt2 = 5
else:
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:171:16: int_decl
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_int_decl_in_declarations937)
int_decl10 = self.int_decl()
self._state.following.pop()
self._adaptor.addChild(root_0, int_decl10.tree)
elif alt2 == 2:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:171:27: bool_decl
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_bool_decl_in_declarations941)
bool_decl11 = self.bool_decl()
self._state.following.pop()
self._adaptor.addChild(root_0, bool_decl11.tree)
elif alt2 == 3:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:171:39: state_decl
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_state_decl_in_declarations945)
state_decl12 = self.state_decl()
self._state.following.pop()
self._adaptor.addChild(root_0, state_decl12.tree)
elif alt2 == 4:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:171:52: data_decl
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_data_decl_in_declarations949)
data_decl13 = self.data_decl()
self._state.following.pop()
self._adaptor.addChild(root_0, data_decl13.tree)
elif alt2 == 5:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:171:64: id_decl
pass
root_0 = self._adaptor.nil()
self._state.following.append(self.FOLLOW_id_decl_in_declarations953)
id_decl14 = self.id_decl()
self._state.following.pop()
self._adaptor.addChild(root_0, id_decl14.tree)
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "declarations"
class const_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "const_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:173:5: const_decl : CONSTANT ID INT -> ^( CONSTANT_ ID INT ) ;
def const_decl(self, ):
retval = self.const_decl_return()
retval.start = self.input.LT(1)
root_0 = None
CONSTANT15 = None
ID16 = None
INT17 = None
CONSTANT15_tree = None
ID16_tree = None
INT17_tree = None
stream_CONSTANT = RewriteRuleTokenStream(self._adaptor, "token CONSTANT")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:173:16: ( CONSTANT ID INT -> ^( CONSTANT_ ID INT ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:173:18: CONSTANT ID INT
pass
CONSTANT15 = self.match(self.input, CONSTANT, self.FOLLOW_CONSTANT_in_const_decl965)
stream_CONSTANT.add(CONSTANT15)
ID16 = self.match(self.input, ID, self.FOLLOW_ID_in_const_decl967)
stream_ID.add(ID16)
INT17 = self.match(self.input, INT, self.FOLLOW_INT_in_const_decl969)
stream_INT.add(INT17)
# AST Rewrite
# elements: ID, INT
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 173:34: -> ^( CONSTANT_ ID INT )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:173:37: ^( CONSTANT_ ID INT )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(CONSTANT_, "CONSTANT_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
self._adaptor.addChild(root_1,
stream_INT.nextNode()
)
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "const_decl"
class int_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "int_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:5: int_decl : INTID range ID ( EQUALSIGN INT )* SEMICOLON -> ^( INT_ range ID ( INITVAL_ INT )* ) ;
def int_decl(self, ):
retval = self.int_decl_return()
retval.start = self.input.LT(1)
root_0 = None
INTID18 = None
ID20 = None
EQUALSIGN21 = None
INT22 = None
SEMICOLON23 = None
range19 = None
INTID18_tree = None
ID20_tree = None
EQUALSIGN21_tree = None
INT22_tree = None
SEMICOLON23_tree = None
stream_EQUALSIGN = RewriteRuleTokenStream(self._adaptor, "token EQUALSIGN")
stream_INTID = RewriteRuleTokenStream(self._adaptor, "token INTID")
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
stream_INT = RewriteRuleTokenStream(self._adaptor, "token INT")
stream_range = RewriteRuleSubtreeStream(self._adaptor, "rule range")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:14: ( INTID range ID ( EQUALSIGN INT )* SEMICOLON -> ^( INT_ range ID ( INITVAL_ INT )* ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:16: INTID range ID ( EQUALSIGN INT )* SEMICOLON
pass
INTID18 = self.match(self.input, INTID, self.FOLLOW_INTID_in_int_decl991)
stream_INTID.add(INTID18)
self._state.following.append(self.FOLLOW_range_in_int_decl993)
range19 = self.range()
self._state.following.pop()
stream_range.add(range19.tree)
ID20 = self.match(self.input, ID, self.FOLLOW_ID_in_int_decl995)
stream_ID.add(ID20)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:31: ( EQUALSIGN INT )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if (LA3_0 == EQUALSIGN) :
alt3 = 1
if alt3 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:32: EQUALSIGN INT
pass
EQUALSIGN21 = self.match(self.input, EQUALSIGN, self.FOLLOW_EQUALSIGN_in_int_decl998)
stream_EQUALSIGN.add(EQUALSIGN21)
INT22 = self.match(self.input, INT, self.FOLLOW_INT_in_int_decl1000)
stream_INT.add(INT22)
else:
break #loop3
SEMICOLON23 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_int_decl1004)
stream_SEMICOLON.add(SEMICOLON23)
# AST Rewrite
# elements: range, ID, INT
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 175:58: -> ^( INT_ range ID ( INITVAL_ INT )* )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:61: ^( INT_ range ID ( INITVAL_ INT )* )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(INT_, "INT_")
, root_1)
self._adaptor.addChild(root_1, stream_range.nextTree())
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:175:77: ( INITVAL_ INT )*
while stream_INT.hasNext():
self._adaptor.addChild(root_1,
self._adaptor.createFromType(INITVAL_, "INITVAL_")
)
self._adaptor.addChild(root_1,
stream_INT.nextNode()
)
stream_INT.reset();
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "int_decl"
class bool_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "bool_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:5: bool_decl : BOOLID ID ( EQUALSIGN BOOL )* SEMICOLON -> ^( BOOL_ ID ( INITVAL_ BOOL )* ) ;
def bool_decl(self, ):
retval = self.bool_decl_return()
retval.start = self.input.LT(1)
root_0 = None
BOOLID24 = None
ID25 = None
EQUALSIGN26 = None
BOOL27 = None
SEMICOLON28 = None
BOOLID24_tree = None
ID25_tree = None
EQUALSIGN26_tree = None
BOOL27_tree = None
SEMICOLON28_tree = None
stream_EQUALSIGN = RewriteRuleTokenStream(self._adaptor, "token EQUALSIGN")
stream_BOOL = RewriteRuleTokenStream(self._adaptor, "token BOOL")
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_BOOLID = RewriteRuleTokenStream(self._adaptor, "token BOOLID")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:15: ( BOOLID ID ( EQUALSIGN BOOL )* SEMICOLON -> ^( BOOL_ ID ( INITVAL_ BOOL )* ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:17: BOOLID ID ( EQUALSIGN BOOL )* SEMICOLON
pass
BOOLID24 = self.match(self.input, BOOLID, self.FOLLOW_BOOLID_in_bool_decl1032)
stream_BOOLID.add(BOOLID24)
ID25 = self.match(self.input, ID, self.FOLLOW_ID_in_bool_decl1034)
stream_ID.add(ID25)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:27: ( EQUALSIGN BOOL )*
while True: #loop4
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == EQUALSIGN) :
alt4 = 1
if alt4 == 1:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:28: EQUALSIGN BOOL
pass
EQUALSIGN26 = self.match(self.input, EQUALSIGN, self.FOLLOW_EQUALSIGN_in_bool_decl1037)
stream_EQUALSIGN.add(EQUALSIGN26)
BOOL27 = self.match(self.input, BOOL, self.FOLLOW_BOOL_in_bool_decl1039)
stream_BOOL.add(BOOL27)
else:
break #loop4
SEMICOLON28 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_bool_decl1043)
stream_SEMICOLON.add(SEMICOLON28)
# AST Rewrite
# elements: ID, BOOL
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 176:55: -> ^( BOOL_ ID ( INITVAL_ BOOL )* )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:58: ^( BOOL_ ID ( INITVAL_ BOOL )* )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(BOOL_, "BOOL_")
, root_1)
self._adaptor.addChild(root_1,
stream_ID.nextNode()
)
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:176:69: ( INITVAL_ BOOL )*
while stream_BOOL.hasNext():
self._adaptor.addChild(root_1,
self._adaptor.createFromType(INITVAL_, "INITVAL_")
)
self._adaptor.addChild(root_1,
stream_BOOL.nextNode()
)
stream_BOOL.reset();
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException as re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "bool_decl"
class state_decl_return(ParserRuleReturnScope):
def __init__(self):
super().__init__()
self.tree = None
# $ANTLR start "state_decl"
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:178:5: state_decl : STATE ID SEMICOLON -> ^( INITSTATE_ ID ) ;
def state_decl(self, ):
retval = self.state_decl_return()
retval.start = self.input.LT(1)
root_0 = None
STATE29 = None
ID30 = None
SEMICOLON31 = None
STATE29_tree = None
ID30_tree = None
SEMICOLON31_tree = None
stream_SEMICOLON = RewriteRuleTokenStream(self._adaptor, "token SEMICOLON")
stream_STATE = RewriteRuleTokenStream(self._adaptor, "token STATE")
stream_ID = RewriteRuleTokenStream(self._adaptor, "token ID")
try:
try:
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:178:16: ( STATE ID SEMICOLON -> ^( INITSTATE_ ID ) )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:178:18: STATE ID SEMICOLON
pass
STATE29 = self.match(self.input, STATE, self.FOLLOW_STATE_in_state_decl1070)
stream_STATE.add(STATE29)
ID30 = self.match(self.input, ID, self.FOLLOW_ID_in_state_decl1072)
stream_ID.add(ID30)
SEMICOLON31 = self.match(self.input, SEMICOLON, self.FOLLOW_SEMICOLON_in_state_decl1074)
stream_SEMICOLON.add(SEMICOLON31)
# AST Rewrite
# elements: ID
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 178:37: -> ^( INITSTATE_ ID )
# /home/tux/PycharmProjects/ProtoGen_public/Parser/ProtoCC.g:178:40: | |
""" Analysis of the content of BioModels
:Author: <NAME> <<EMAIL>>
:Date: 2017-05-31
:Copyright: 2017, Karr Lab
:License: MIT
"""
import bioservices
import ete3
import glob
import datanator.data_source.bio_portal
import libsbml
import os
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
import subprocess
import wc_utils.util.list
import wc_utils.workbook.core
import wc_utils.workbook.io
RELEASE_NAME = 'BioModels_Database-r30_pub-sbml_files'
SBML_FILES_URL = 'ftp://ftp.ebi.ac.uk/pub/databases/biomodels/releases/2016-05-10/{}.tar.bz2'.format(RELEASE_NAME)
DATA_DIRNAME = os.path.join(os.path.dirname(__file__), 'data')
SBML_FILES_ARCHIVE_FILENAME = os.path.join(DATA_DIRNAME, 'sbml_files.tar.bz2')
SBML_FILES_DIRNAME = os.path.join(DATA_DIRNAME, 'sbml_files')
SBML_FILES_DATABASE_FILENAME = os.path.join(DATA_DIRNAME, 'sbml_files.sqlite')
ANNOTATIONS_EXCEL_FILENAME = os.path.join(DATA_DIRNAME, 'models.xlsx')
SUMMARY_EXCEL_FILENAME = os.path.join(DATA_DIRNAME, 'summary.xlsx')
def create_data_directory():
""" Create directory for files """
if not os.path.isdir(DATA_DIRNAME):
os.makedirs(DATA_DIRNAME)
def download_biomodels():
""" Download BioModels release and extract content """
# download release
if not os.path.isfile(SBML_FILES_ARCHIVE_FILENAME):
print('Downloading BioModels ...')
subprocess.call(['wget', SBML_FILES_URL, '-O', SBML_FILES_ARCHIVE_FILENAME])
print(' done.')
# extract release archive
if not os.path.isdir(SBML_FILES_DIRNAME):
print('Unpacking BioModels ...')
subprocess.call(['tar', '-xvjf', SBML_FILES_ARCHIVE_FILENAME, '-C', DATA_DIRNAME])
os.rename(os.path.join(DATA_DIRNAME, RELEASE_NAME), SBML_FILES_DIRNAME)
print(' done.')
def get_database_engine():
"""
Returns:
:obj:`sqlalchemy.engine.Engine`: database engine
"""
return sqlalchemy.create_engine('sqlite:///' + SBML_FILES_DATABASE_FILENAME)
def get_database_session():
"""
Returns:
:obj:`sqlalchemy.orm.session.Session`: sqlalchemy session
"""
engine = get_database_engine()
return sqlalchemy.orm.sessionmaker(bind=engine)()
def setup_database(clear=False):
if not os.path.isfile(SBML_FILES_DATABASE_FILENAME) or clear:
clear_database()
def clear_database():
engine = get_database_engine()
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
def load_database():
sbml_reader = libsbml.SBMLReader()
session = get_database_session()
if session.query(Model).count() > 0:
return
print('Loading models ...')
curated_models = sorted(glob.glob(os.path.join(SBML_FILES_DIRNAME, 'curated', '*.xml')))
for i_model, filename in enumerate(curated_models):
if i_model % 100 == 0:
print(' Loading curated model {} of {}'.format(i_model + 1, len(curated_models)))
model = load_model_into_database(filename, True, sbml_reader, session)
non_curated_models = sorted(glob.glob(os.path.join(SBML_FILES_DIRNAME, 'non_curated', '*.xml')))
for i_model, filename in enumerate(non_curated_models):
if i_model % 100 == 0:
print(' Loading non-curated model {} of {}'.format(i_model + 1, len(non_curated_models)))
model = load_model_into_database(filename, False, sbml_reader, session)
print(' done.')
session.commit()
def load_model_into_database(filename, curated, sbml_reader, session):
"""
Args:
filename (:obj:`str`): path to a SBML file
curated (:obj:`bool`): :obj:`True`, the model has been curated
sbml_reader (:obj:`libsbml.SBMLReader`): SBML file reader
session (:obj:`sqlalchemy.orm.session.Session`): sqlalchemy session
Returns:
:obj:`Model`: model
"""
# todo: detect mathematical type (ODE, SSA, logical, FBA, spatial, rule-based)
doc = sbml_reader.readSBMLFromFile(filename)
sbml_model = doc.getModel()
if not sbml_model:
return None
id, _, _ = os.path.basename(filename).partition('.xml')
label = sbml_model.getId()
name = sbml_model.getName()
annotations = parse_model_annotations(sbml_model, session)
type = parse_model_type(doc, annotations)
num_reaction_parameters = 0
reactions_sbml = sbml_model.getListOfReactions()
for i_reaction in range(sbml_model.getNumReactions()):
reaction_sbml = reactions_sbml.get(i_reaction)
kinetic_law_sbml = reaction_sbml.getKineticLaw()
if kinetic_law_sbml:
num_reaction_parameters += kinetic_law_sbml.getNumParameters()
num_reaction_parameters += kinetic_law_sbml.getNumLocalParameters()
model = get_or_create_object(session, Model, id=id)
model.label = label
model.name = name
model.type = type
model.compartments = sbml_model.getNumCompartments()
model.species = sbml_model.getNumSpecies()
model.rules = sbml_model.getNumRules()
model.reactions = sbml_model.getNumReactions()
model.global_parameters = sbml_model.getNumParameters()
model.reaction_parameters = num_reaction_parameters
model.curated = curated
model.annotations.extend(annotations)
session.add(model)
return model
def parse_model_type(doc, annotations):
"""
Args:
doc (:obj:`libsbml.SBMLDocument`): SBML document
annotations (:obj:`list`: of :obj:`Annotation`): list of annotations
Returns:
:obj:`str`: model type
"""
model = doc.getModel()
if doc.getPackageRequired('spatial'):
return 'spatial'
if doc.getPackageRequired('qual'):
return 'logical'
if doc.getPackageRequired('multi'):
return 'rule-based'
for annotation in annotations:
if annotation.namespace == 'mamo' and annotation.id == 'MAMO_0000046':
return 'ordinary differential equation'
if doc.getPackageRequired('fbc'):
return 'flux balance analysis'
reactions = model.getListOfReactions()
for i_reaction in range(model.getNumReactions()):
reaction = reactions.get(i_reaction)
kinetic_law = reaction.getKineticLaw()
if kinetic_law:
has_lower_bound = False
has_upper_bound = False
has_flux_value = False
has_obj_coeff = False
parameters = kinetic_law.getListOfParameters()
for i_parameter in range(kinetic_law.getNumParameters()):
parameter = parameters.get(i_parameter)
id = parameter.getId()
if id == 'LOWER_BOUND':
has_lower_bound = True
elif id == 'UPPER_BOUND':
has_upper_bound = True
elif id == 'FLUX_VALUE':
has_flux_value = True
elif id == 'OBJECTIVE_COEFFICIENT':
has_obj_coeff = True
parameters = kinetic_law.getListOfLocalParameters()
for i_parameter in range(kinetic_law.getNumLocalParameters()):
parameter = parameters.get(i_parameter)
id = parameter.getId()
if id == 'LOWER_BOUND':
has_lower_bound = True
elif id == 'UPPER_BOUND':
has_upper_bound = True
elif id == 'FLUX_VALUE':
has_flux_value = True
elif id == 'OBJECTIVE_COEFFICIENT':
has_obj_coeff = True
if has_lower_bound and has_upper_bound and has_flux_value and has_obj_coeff:
return 'flux balance analysis'
return None
def parse_model_annotations(model, session):
"""
Args:
model (:obj:`libsbml.Model`): model
session (:obj:`sqlalchemy.orm.session.Session`): sqlalchemy session
Returns:
:obj:`list` of :obj:`Annotation`: list of annotations
"""
if not model.isSetAnnotation():
return {}
annotations_sbml = model.getAnnotation().getChild('RDF').getChild('Description')
tags = {}
annotations = []
attr = libsbml.XMLTriple('resource', 'http://www.w3.org/1999/02/22-rdf-syntax-ns#', 'rdf')
for i_child in range(annotations_sbml.getNumChildren()):
child = annotations_sbml.getChild(i_child)
relationship = child.getName()
if relationship in ['creator', 'created', 'modified']:
continue
for i_bag in range(child.getNumChildren()):
bag = child.getChild(i_bag)
if bag.getName() != 'Bag':
raise ValueError('Expected Bag, got {0}.{1} for model {2}'.format(child.getName(), bag.getName(), model.getId()))
for i_li in range(bag.getNumChildren()):
li = bag.getChild(i_li)
if li.getName() != 'li':
raise ValueError('Expected {0}.{1}.li, got {0}.{1}.{2} for model {3}'.format(
child.getName(), bag.getName(), li.getName(), model.getId()))
resource = li.getAttrValue(attr)
if resource.startswith('http://identifiers.org/'):
tmp = resource.split('/')
namespace = tmp[3]
id = '/'.join(tmp[4:])
else:
namespace = 'url'
id = resource
annotations.append(get_or_create_object(session, Annotation, namespace=namespace, id=id, relationship=relationship))
return annotations
def get_or_create_object(session, cls, **kwargs):
"""
Args:
session (:obj:`sqlalchemy.orm.session.Session`): sqlalchemy session
cls (:obj:`type`): class to search or create
**kwargs (:obj:`dict`): dictionary of keyword arguments to pass to filter_by and the class constructor
Returns:
:obj:`Base`
"""
q = session.query(cls).filter_by(**kwargs)
if q.count():
return q.first()
return cls(**kwargs)
def model_to_str(model):
"""
Args:
model (:obj:`Model`): model
Returns:
:obj:`str`: string representation of model
"""
str = model.id
for annotation in model.annotations:
str += '\n {}: {}:{}'.format(annotation.relationship, annotation.namespace, annotation.id)
return str
def export_annotations_to_excel():
if os.path.isfile(ANNOTATIONS_EXCEL_FILENAME):
return
session = get_database_session()
wb = wc_utils.workbook.core.Workbook()
ws_models = wb['Models'] = wc_utils.workbook.core.Worksheet()
ws_model_annotations = wb['Model annotations'] = wc_utils.workbook.core.Worksheet()
ws_annotations = wb['Annotations'] = wc_utils.workbook.core.Worksheet()
ws_namespaces = wb['Namespaces'] = wc_utils.workbook.core.Worksheet()
style = wc_utils.workbook.io.WorkbookStyle()
style['Models'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Model annotations'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Annotations'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Namespaces'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1,
head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
ws_models.append(wc_utils.workbook.core.Row([
'ID', 'Label', 'Name', 'Type',
'Compartments', 'Species', 'Rules', 'Reactions', 'Global parameters', 'Reaction parameters',
'Superkingdom', 'Kingdom', 'Phylum', 'Species',
'Is curated', 'Number annotations']))
ws_model_annotations.append(wc_utils.workbook.core.Row(['Model', 'Relationship', 'Namespace', 'ID', 'Description']))
ws_annotations.append(wc_utils.workbook.core.Row(['Relationship', 'Namespace', 'Frequency']))
ws_namespaces.append(wc_utils.workbook.core.Row(['Namespace', 'Frequency']))
bio_portal = datanator.data_source.bio_portal.BioPortal()
bio_portal_ontologies = bio_portal.get_ontologies()
del(bio_portal_ontologies['MAMO']) # remove MAMO becuse OWL can't be parsed by pronto
kegg = bioservices.kegg.KEGG()
reactome = bioservices.reactome.Reactome()
loaded_ontologies = {}
ncbi_taxa = ete3.NCBITaxa()
n_model = session.query(Model).count()
print('Annotating models ...')
for i_model, model in enumerate(session.query(Model).order_by(Model.id).all()):
if i_model % 100 == 0:
print(' Annotating model {} of {}'.format(i_model + 1, n_model))
species_name = None
phylum_name = None
kingdom_name = None
superkingdom_name = None
taxon_id = next((int(float(a.id)) for a in model.annotations if a.namespace == 'taxonomy'), None)
if taxon_id:
for taxon_id, rank in ncbi_taxa.get_rank(ncbi_taxa.get_lineage(taxon_id)).items():
if rank == 'species':
species_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'phylum':
phylum_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'kingdom':
kingdom_name = ncbi_taxa.translate_to_names([taxon_id])[0]
if rank == 'superkingdom':
superkingdom_name = ncbi_taxa.translate_to_names([taxon_id])[0]
ws_models.append(wc_utils.workbook.core.Row([
model.id, model.label, model.name, model.type,
model.compartments or None, model.species or None, model.rules or None, model.reactions or None,
model.global_parameters or None, model.reaction_parameters or None,
superkingdom_name, kingdom_name, phylum_name, species_name,
model.curated, len(model.annotations)
]))
for annotation in sorted(model.annotations, key=lambda ann: (ann.relationship, ann.namespace, ann.id)):
onto_id = annotation.namespace.upper()
if onto_id.startswith('OBO.'):
onto_id = onto_id[4:]
term_id = annotation.id
if onto_id in bio_portal_ontologies and term_id.startswith(onto_id + ':'):
if onto_id not in loaded_ontologies:
loaded_ontologies[onto_id] = bio_portal.get_ontology(onto_id)
onto = loaded_ontologies[onto_id]
if term_id in onto:
description = onto[term_id].name
else:
description = None
elif annotation.namespace == 'kegg.pathway':
md = kegg.parse(kegg.get(annotation.id))
if isinstance(md, dict):
description = md['NAME'][0]
else:
description = None
elif annotation.namespace == 'reactome':
md = reactome.query_by_id('Pathway', annotation.id)
if 'displayName' in md:
description = md['displayName']
else:
description = None
elif annotation.namespace == 'taxonomy':
description = ncbi_taxa.translate_to_names([int(float(annotation.id))])[0]
else:
description = None
ws_model_annotations.append(wc_utils.workbook.core.Row(
[model.id, annotation.relationship, annotation.namespace, annotation.id, description]))
print(' done')
q = session \
.query(Annotation.relationship, Annotation.namespace, sqlalchemy.func.count(Model._id)) \
.join(Model, Annotation.models) \
.group_by(Annotation.relationship, Annotation.namespace) \
.order_by(sqlalchemy.func.count(Model._id).desc())
for relationship, namespace, count in q.all():
ws_annotations.append(wc_utils.workbook.core.Row([relationship, namespace, count]))
q = session \
.query(Annotation.namespace, sqlalchemy.func.count(Model._id)) \
.join(Model, Annotation.models) \
.group_by(Annotation.namespace) \
.order_by(sqlalchemy.func.count(Model._id).desc())
for namespace, count in q.all():
ws_namespaces.append(wc_utils.workbook.core.Row([namespace, count]))
wc_utils.workbook.io.ExcelWriter(ANNOTATIONS_EXCEL_FILENAME).run(wb, style=style)
def summarize_models():
wb = wc_utils.workbook.core.Workbook()
style = wc_utils.workbook.io.WorkbookStyle()
ws = wb['Pathways'] = wc_utils.workbook.core.Worksheet()
style['Pathways'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
summarize_models_by_pathway(ws)
ws_species = wb['Species'] = wc_utils.workbook.core.Worksheet()
ws_phyla = wb['Phyla'] = wc_utils.workbook.core.Worksheet()
style['Species'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
style['Phyla'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
summarize_models_by_taxonomy(ws_species, ws_phyla)
ws = wb['Mathematical types'] = wc_utils.workbook.core.Worksheet()
style['Mathematical types'] = wc_utils.workbook.io.WorksheetStyle(
head_rows=1, head_columns=1, head_row_font_bold=True, head_row_fill_fgcolor='CCCCCC', row_height=15)
summarize_models_by_mathematical_type(ws)
wc_utils.workbook.io.ExcelWriter(SUMMARY_EXCEL_FILENAME).run(wb, style=style)
def summarize_models_by_pathway(ws):
"""
Args:
wc_utils.workbook.core.Worksheet
"""
session = get_database_session()
#bio_portal = datanator.data_source.bio_portal.BioPortal()
#onto = bio_portal.get_ontology('EFO')
#self.assertEqual(onto['SBO:0000001'].name, 'rate law')
def summarize_models_by_taxonomy(ws_species, ws_phyla):
"""
Args:
wc_utils.workbook.core.Worksheet
"""
session = get_database_session()
q_annotated = session \
.query(Annotation.id, Model.curated, sqlalchemy.func.count(Model._id)) \
.join(Model, Annotation.models) \
.filter(Annotation.namespace == 'taxonomy') \
.group_by(Annotation.id, Model.curated)
annotated_model_ids = [m[0] for m in session
.query(Model._id)
.join(Annotation, Model.annotations)
.filter(Annotation.namespace == 'taxonomy')
.group_by(Model._id)
.all()]
q_unannotated = session \
.query(Model.curated, sqlalchemy.func.count(Model._id)) \
.filter(~Model._id.in_(annotated_model_ids)) \
.group_by(Model.curated)
count_unannotated = {}
for curated, count in q_unannotated.all():
count_unannotated[curated] = count
ncbi_taxa = ete3.NCBITaxa()
| |
# -*- coding: UTF-8 -*-
# Copyright 2009-2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""This defines the :class:`Action` class and the :func:`action`
decorator, and some of the standard actions. See :ref:`dev.actions`.
"""
import six
from builtins import str
import logging ; logger = logging.getLogger(__name__)
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext as gettext
from django.utils.encoding import python_2_unicode_compatible
from django.utils.text import format_lazy
from django.utils.encoding import force_text
from django.conf import settings
from django.db import models
from django.apps import apps
get_models = apps.get_models
from lino.core import constants
from lino.core import layouts
from lino.core import fields
from lino.core import keyboard
from lino.modlib.users.utils import get_user_profile
from lino.utils.choosers import Chooser
from lino.core.fields import set_default_verbose_name
from .permissions import Permittable
from .utils import obj2unicode
from .utils import resolve_model
from .utils import navinfo
from .utils import Parametrizable
from .requests import InstanceAction
def check_for_chooser(holder, field):
# holder is either a Model, an Actor or an Action.
if isinstance(field, fields.DummyField):
return
methname = field.name + "_choices"
m = getattr(holder, methname, None)
if m is not None:
ch = Chooser(holder, field, m)
d = holder.__dict__.get('_choosers_dict', None)
if d is None:
d = dict()
setattr(holder, '_choosers_dict', d)
if ch in d:
raise Exception("Redefinition of chooser %s" % field)
d[field.name] = ch
# if field.name == 'city':
# logger.info("20140822 chooser for %s.%s", holder, field.name)
def discover_choosers():
logger.debug("Discovering choosers for model fields...")
#~ logger.debug("Instantiate model reports...")
for model in get_models():
#~ n = 0
allfields = model._meta.fields
for field in allfields:
check_for_chooser(model, field)
#~ logger.debug("Discovered %d choosers in model %s.",n,model)
def install_layout(cls, k, layout_class, **options):
"""
- `cls` is the actor (a class object)
- `k` is one of 'detail_layout', 'insert_layout', 'params_layout'
- `layout_class`
"""
# if str(cls) == 'courses.Pupils':
# print("20160329 install_layout", k, layout_class)
dl = cls.__dict__.get(k, None)
if dl is None: # and not cls._class_init_done:
dl = getattr(cls, k)
if dl is None:
return
if isinstance(dl, six.string_types):
if '\n' in dl or not '.' in dl:
setattr(cls, k, layout_class(dl, cls, **options))
else:
layout_class = settings.SITE.models.resolve(dl)
if layout_class is None:
raise Exception("Unresolved {} {!r} for {}".format(k, dl, cls))
setattr(cls, k, layout_class(None, cls, **options))
elif isinstance(dl, layouts.Panel):
options.update(dl.options)
setattr(cls, k, layout_class(dl.desc, cls, **options))
else:
if not isinstance(dl, layout_class):
if not isinstance(cls, type):
# cls is an action instance
cls = cls.__class__
msg = "{}.{}.{} must be a string, " \
"a Panel or an instance of {} (not {!r})"
raise Exception(msg.format(
cls.__module__, cls.__name__, k, layout_class.__name__, dl))
if dl._datasource is None:
dl.set_datasource(cls)
setattr(cls, k, dl)
elif not issubclass(cls, dl._datasource):
raise Exception(
"Cannot reuse %s instance (%s of %r) for %r" %
(dl.__class__, k, dl._datasource, cls))
def register_params(cls):
"""`cls` is either an actor (a class object) or an action (an
instance).
"""
if cls.parameters:
for k, v in cls.parameters.items():
v.set_attributes_from_name(k)
v.table = cls
# v.model = cls # 20181023 experimentally
if cls.params_layout is None:
cls.params_layout = cls._layout_class.join_str.join(
cls.parameters.keys())
install_layout(cls, 'params_layout', cls._layout_class)
elif cls.params_layout is not None:
raise Exception(
"{} has a params_layout but no parameters".format(
cls))
def setup_params_choosers(self):
if self.parameters:
for k, fld in self.parameters.items():
if isinstance(fld, models.ForeignKey):
msg = "Invalid target %s in parameter {} of {}".format(
k, self)
fld.remote_field.model = resolve_model(fld.remote_field.model, strict=msg)
set_default_verbose_name(fld)
check_for_chooser(self, fld)
def make_params_layout_handle(self):
# `self` is either an Action instance or an Actor class object
return self.params_layout.get_layout_handle(
settings.SITE.kernel.default_ui)
@python_2_unicode_compatible
class Action(Parametrizable, Permittable):
"""
Abstract base class for all actions.
The first argument is the optional `label`, other arguments should
be specified as keywords and can be any of the existing class
attributes.
"""
#~ __metaclass__ = ActionMetaClass
_layout_class = layouts.ActionParamsLayout
label = None
"""
The label of this action. A short descriptive text in user
language. Used e.g. on menu items. Also on toolbar buttons if they
have neither :attr:`icon_name` nor :attr:`button_text`.
"""
button_text = None
"""
The text to appear on buttons for this action. If this is not
defined, the :attr:`label` is used.
"""
button_color = None
"""
The color to be used on icon-less buttons for this action
(i.e. which have no :attr:`icon_name`). See also
:attr:`lino.core.site.Site.use_silk_icons`.
Not yet implemented. This is currently being ignored.
"""
debug_permissions = False
save_action_name = None
disable_primary_key = True
"""
Whether primary key fields should be disabled when using this
action. This is `True` for all actions except :class:`ShowInsert`.
"""
keep_user_values = False
"""
Whether the parameter window should keep its values between
different calls. If this is True, Lino does not fill any default
values and leaves those from a previous call.
"""
icon_name = None
"""
The class name of an icon to be used for this action when
rendered as toolbar button. Allowed icon names are defined in
:data:`lino.core.constants.ICON_NAMES`.
"""
ui5_icon_name = None
react_icon_name = None
hidden_elements = frozenset()
combo_group = None
"""
The name of another action to which to "attach" this action.
Both actions will then be rendered as a single combobutton.
"""
parameters = None
use_param_panel = False
"""
Used internally. This is True for window actions whose window use
the parameter panel: grid and emptytable (but not showdetail)
"""
no_params_window = False
"""
Set this to `True` if your action has :attr:`parameters` but you
do *not* want it to open a window where the user can edit these
parameters before calling the action.
Setting this attribute to `True` means that the calling code must
explicitly set all parameter values. Usage example is the
:attr:`lino_xl.lib.polls.models.AnswersByResponse.answer_buttons`
virtual field.
"""
sort_index = 90
"""
Determines the sort order in which the actions will be presented
to the user.
List actions are negative and come first.
Predefined `sort_index` values are:
===== =================================
value action
===== =================================
-1 :class:`as_pdf <lino_xl.lib.appypod.PrintTableAction>`
10 :class:`ShowInsert`
11 :attr:`duplicate <lino.mixins.duplicable.Duplicable.duplicate>`
20 :class:`detail <ShowDetail>`
30 :class:`delete <DeleteSelected>`
31 :class:`merge <lino.core.merge.MergeAction>`
50 :class:`Print <lino.mixins.printable.BasePrintAction>`
51 :class:`Clear Cache <lino.mixins.printable.ClearCacheAction>`
52 :attr:`lino.modlib.users.UserPlan.start_plan`
53 :attr:`lino.modlib.users.UserPlan.update_plan`
60 :class:`ShowSlaveTable`
90 default for all custom row actions
100 :class:`SubmitDetail`
200 default for all workflow actions (:class:`ChangeStateAction <lino.core.workflows.ChangeStateAction>`)
===== =================================
"""
help_text = None
"""
A help text that shortly explains what this action does. In a
graphical user interface this will be rendered as a **tooltip**
text.
If this is not given by the code, Lino will potentially set it at
startup when loading the :xfile:`help_texts.py` files.
"""
submit_form_data = False
"""
Should the running of the action include all known form values in
the request.
"""
auto_save = True
"""
What to do when this action is being called while the user is on a
dirty record.
- `False` means: forget any changes in current record and run the
action.
- `True` means: save any changes in current record before running
the action. `None` means: ask the user.
"""
extjs_main_panel = None
"""
Used by :mod:`lino_xl.lib.extensible` and
:mod:`lino.modlib.awesome_uploader`.
Example::
class CalendarAction(dd.Action):
extjs_main_panel = "Lino.CalendarApp().get_main_panel()"
...
"""
js_handler = None
"""
This is usually `None`. Otherwise it is the name of a Javascript
callable to be called without arguments. That callable must have
been defined in a :attr:`lino.core.plugin.Plugin.site_js_snippets`
of the plugin.
"""
action_name = None
"""
Internally used to store the name of this action within the
defining Actor's namespace.
"""
defining_actor = None
"""
The :class:`lino.core.actors.Actor` who uses this action for the
first time. This is set during :meth:`attach_to_actor`. This is
used internally e.g. by :mod:`lino.modlib.extjs` when generating
JavaScript code for certain actions.
"""
parameters = None
"""
See :attr:`lino.core.utils.Parametrizable.parameters`.
"""
key = None
"""
Not used. The keyboard hotkey to associate to this action in a
user interface.
"""
default_format = 'html'
"""
Used internally.
"""
editable = True
"""
Whether the parameter fields should be editable.
Setting this to False seems nonsense.
"""
readonly = True
"""
Whether this action is readonly, i.e. does not change any data in
the current data object.
Setting this to `False` will (1) disable the action for
`readonly` user types or when
:attr:`lino.core.site.Site.readonly` is True, and (2) will
cause it to be logged when :attr:`log_each_action_request
<lino.core.site.Site.log_each_action_request>` is set to
`True`.
Note that :class:`ShowInsert` is readonly because it does not
modify the current data object. For example the button would
be disabled on a registered | |
import re
import uuid
from urllib import urlopen
from datetime import datetime, timedelta
from flask import request, jsonify
from models import AccountDetails, TaskLog, UserProfile, UserSkill, TaskDetails, ProjectDetails, \
WorkspaceDetails, SkillData
from routes.authenticated.utils import send_invitation_email, check_project_access
def return_json(data, message=None):
response = {}
if message:
response['message'] = message
response['data'] = data
response['code'] = 200
return jsonify(response)
def get_workspace(wk_id):
return WorkspaceDetails.get_by_id(wk_id)
def get_all_workspace_projects(wks_key):
return [dict(project.to_dict(), **dict(ProjectID=project.key.id(), Developers=project.get_developers())) for project
in
ProjectDetails.query(ProjectDetails.Wks == wks_key).fetch()]
def get_project(wks_key, project_id):
project = ProjectDetails.get_by_id(project_id)
if project:
if project.Wks == wks_key:
return project
return {'code': 403, 'message': "Project (" + str(project_id) + ") not part of workspace. Forbidden access."}
return {'code': 404, 'message': "Project not found: " + str(project_id)}
def get_account(ProfileID):
return UserProfile.get_by_id(ProfileID)
def get_skills(wks_key):
return [dict(skill.to_dict(), **dict(SkillID=skill.key.id(), usage=skill.usage())) for skill in
SkillData.query(SkillData.Wks == wks_key).fetch()]
def get_user_skill(wks_key, account_key):
return [dict(u_s.to_dict(), **dict(name=u_s.skill_name())) for u_s in
UserSkill.query(UserSkill.Wks == wks_key, UserSkill.User == account_key).fetch()]
def get_user_projects(wks_key, ProfileID):
data = get_account(ProfileID)
return [dict(project.to_dict(), **dict(ProjectID=project.key.id())) for project in
ProjectDetails.query(ProjectDetails.Wks == wks_key).fetch() if
check_project_access(project, data.UserEmail, data.role)]
def validate_profile_update(wks_key, current_role, body):
resp = {}
if 'role' in body:
role = body['role'].lower()
valid_choices = ['admin', 'manager', 'developer']
if validate_choices(role, valid_choices) == False:
return {'code': 400, 'message': 'Invalid role provided. Must be one of ' + str(valid_choices)}
if role == current_role:
return {'code': 400, 'message': 'User already owns this role.'}
if current_role == 'admin' and count_system_admins(wks_key) == 1:
return {'code': 400, 'message': "Can not remove only admin in system."}
current_role = role
if 'disabled' in body:
if current_role == 'admin' and count_system_admins(wks_key) == 1 and body['disabled'] == True:
return {'code': 400, 'message': "Can not disable only admin in system."}
return resp
def count_system_admins(wks_key):
return UserProfile.query(UserProfile.Wks == wks_key, UserProfile.role == "admin").count()
def create_skill(wks_key, body):
resp = {}
check_duplicate = SkillData.query(SkillData.Wks == wks_key, SkillData.skill_name == body['skill_name']).get()
if check_duplicate:
return {'code': 400, 'message': "Skill already exists in this workspace: " + body['skill_name']}
skill_data = SkillData(
Wks=wks_key,
skill_name=body['skill_name']
).put()
resp['SkillID'] = skill_data.id()
resp["infomation"] = "Skill created successfully."
return resp
def get_users(wks_key):
return [dict(user.to_dict(), **dict(ProfileID=user.key.id(), name=user.get_name(), AccountID=user.get_id())) for
user in
UserProfile.query(UserProfile.Wks == wks_key).fetch()]
def get_user(wks_key, ProfileID):
user = UserProfile.get_by_id(ProfileID)
if user:
if user.Wks == wks_key:
return user
return {'code': 403, 'message': "User (" + str(ProfileID) + ") not part of workspace. Forbidden access."}
return {'code': 404, 'message': "User not found: " + str(ProfileID)}
def get_task(wks_key, TaskID):
task = TaskDetails.get_by_id(TaskID)
if task:
if task.get_wks() == wks_key:
return task
return {'code': 403, 'message': "Task (" + str(TaskID) + ") not part of workspace. Forbidden access."}
return {'code': 404, 'message': "Task not found: " + str(TaskID)}
def get_tasks(project_key):
return convert_tasks(TaskDetails.query(TaskDetails.Project == project_key).fetch())
def mandatory(allowed_items, body):
for item in allowed_items:
if item not in body or body[item] == "":
return {'code': 400, 'message': 'Property missing or null provided: ' + item}
return True
def parse_email(email):
if re.match("[^@]+@[^@]+\.[^@]+", email):
return True
return False
def invite_user(wks, body):
resp = {}
key = wks.key
name = wks.workspace_name
UserEmail = body['UserEmail']
role = body['role']
if parse_email(UserEmail) == False:
return {'code': 400, 'message': 'Invalid email format provided.'}
valid_choices = ['admin', 'manager', 'developer']
if validate_choices(role, valid_choices) == False:
return {'code': 400, 'message': 'Invalid role provided. Must be one of ' + str(valid_choices)}
if UserProfile.query(UserProfile.Wks == key, UserProfile.UserEmail == UserEmail).get():
return {'code': 400, 'message': 'User: ' + UserEmail + ' already invited to this workspace!'}
token = <KEY>
user_data = UserProfile(
Wks=key,
workspace_name=name,
UserEmail=UserEmail,
role=role,
invitation_token=token,
invitation_accepted=False,
disabled=False
).put()
send_invitation_email(token, UserEmail)
resp['ProfileID'] = user_data.id()
resp["information"] = 'User: ' + UserEmail + ' invited!'
return resp
def format_date(date):
try:
return datetime.strptime(str(date), '%d/%m/%Y').date()
except:
return False
def validate_choices(given, valid_choices):
if given in valid_choices:
return True
return False
def validate_project(body):
if is_manager(body['project_manager']) == False:
return {'code': 400, 'message': body['project_manager'] + " is not an active admin or manager."}
project_start = format_date(body['project_start'])
project_deadline = format_date(body['project_deadline'])
if project_start == False or project_deadline == False:
return {'code': 400, 'message': "Dates must be in dd/mm/YYYY format."}
if project_start > project_deadline:
return {'code': 400, 'message': "project_start must be lower than project_deadline."}
if datetime.today().date() > project_start:
return {'code': 400, 'message': "project_start must be after or on the current date."}
if datetime.today().date() > project_deadline:
return {'code': 400, 'message': "project_deadline must be after or on the current date."}
return True
def update_project(allowed_items, project, body):
resp = {}
old_body = body.copy()
for item in allowed_items:
if item not in body:
body[item] = getattr(project, item)
if validate_project(body) != True:
return validate_project(body)
valid_choices = ['Running', 'Closed', 'On Hold']
if validate_choices(body['task_status'], valid_choices) == False:
return {'code': 400, 'message': 'Invalid task_status provided. Must be one of ' + str(valid_choices)}
for item in body:
new_value = body[item]
setattr(project, item, new_value)
project.put()
resp["information"] = str(old_body.keys()) + " updated."
return resp
def create_project(wks_key, body):
resp = {}
if validate_project(body) != True:
return validate_project(body)
project_data = ProjectDetails(
Wks=wks_key,
project_manager=body['project_manager'],
project_name=body['project_name'],
project_description=body['project_description'],
project_start=body['project_start'],
project_deadline=body['project_deadline'],
project_status="Running",
project_stage="Planning"
).put()
resp['ProjectID'] = project_data.id()
resp["information"] = "Project created successfully."
return resp
def profile_from_account(wks_key, account_id):
account = AccountDetails.get_by_id(account_id)
if account:
user = UserProfile.query(UserProfile.Wks == wks_key, UserProfile.UserEmail == account.email).get()
if user:
return user
return {'code': 403, 'message': "User (" + str(ProfileID) + ") not part of workspace. Forbidden access."}
return {'code': 404, 'message': "User not found: " + str(ProfileID)}
def validate_task(wks_key, body):
skills = body['task_skills']
for skill_id in skills:
skill_check = check_skill_exists(wks_key, skill_id)
if skill_check != True:
return skill_check
developers = body['task_developers']
for account_id in developers:
profile = profile_from_account(wks_key, account_id)
if isinstance(profile, dict):
return profile
user_check = get_user(wks_key, profile.key.id())
if isinstance(user_check, dict):
return user_check
if user_check.invitation_accepted == False:
return {'code': 400, 'message': "User has not accepted invite: " + str(account_id)}
if user_check.disabled == True:
return {'code': 400, 'message': "User is disabled: " + str(account_id)}
dev_check = developer_has_skill(wks_key, user_check, skills)
if dev_check != True:
return {'code': 400, 'message': "User does not have any of the required skills: " + str(account_id)}
start = format_date(body['task_startdate'])
deadline = format_date(body['task_finishbydate'])
if start == False or deadline == False:
return {'code': 400, 'message': "Dates must be in dd/mm/YYYY format."}
if start > deadline:
return {'code': 400, 'message': "task_startdate must be lower than task_finishbydate."}
if datetime.today().date() > start:
return {'code': 400, 'message': "task_startdate must be after or on the current date."}
if datetime.today().date() > deadline:
return {'code': 400, 'message': "task_finishbydate must be after or on the current date."}
if body['task_aminutes'] <= 0:
return {'code': 400, 'message': "task_aminutes must be greater than 0."}
return True
def developer_has_skill(wks_key, user, skill_list):
for skill in skill_list:
user_skills = user_has_skill(wks_key, user.get_user_key(), skill)
if user_skills != False:
return True
return False
def user_has_skill(wks_key, account_key, skill_id):
user_skill = UserSkill.query(UserSkill.Wks == wks_key,
UserSkill.User == account_key,
UserSkill.skill_id == skill_id).get()
if user_skill:
return user_skill
return False
def update_task(allowed_items, key, task, body):
resp = {}
old_body = body.copy()
for item in allowed_items:
if item not in body:
if item == 'task_startdate' or item == 'task_finishbydate':
body[item] = getattr(task, item).strftime('%d/%m/%Y')
else:
body[item] = getattr(task, item)
if validate_task(key, body) != True:
return validate_task(key, body)
valid_choices = task.get_all_other_tasks()
valid_choices.append("None")
if validate_choices(body['parent_task'], valid_choices) == False:
return {'code': 400, 'message': 'Invalid parent_task provided. Must be one of ' + str(valid_choices)}
valid_choices = ['Open', 'Closed']
if validate_choices(body['task_status'], valid_choices) == False:
return {'code': 400, 'message': 'Invalid task_status provided. Must be one of ' + str(valid_choices)}
for item in body:
if item == 'task_startdate' or item == 'task_finishbydate':
new_value = format_date(body[item])
elif item == 'parent_task':
if body[item] == 'None':
new_value = None
else:
new_value = int(body[item])
else:
new_value = body[item]
setattr(task, item, new_value)
task.put()
resp["information"] = str(old_body.keys()) + " updated."
return resp
def create_task(wks_key, project, body):
resp = {}
validation = validate_task(wks_key, body)
if validation != True:
return validation
task_data = TaskDetails(
Project=project.key,
task_name=body['task_name'],
task_description=body['task_description'],
task_aminutes=body['task_aminutes'],
task_skills=body['task_skills'],
task_developers=body['task_developers'],
task_status="Open",
task_startdate=format_date(body['task_startdate']),
task_finishbydate=format_date(body['task_finishbydate'])
).put()
resp['TaskID'] = task_data.id()
resp["information"] = "Task created successfully."
return resp
def is_manager(email):
user = UserProfile.query(UserProfile.UserEmail == email).get()
if not user:
return False
if user.invitation_accepted == True and user.disabled == False and (user.role == "manager" or user.role == "admin"):
return True
return False
def convert_string_to_bool(str):
if str.lower() == "False".lower():
item = False
else:
item = True
return item
def is_number(number):
try:
float(number)
return True
except ValueError:
return False
def is_number_list(number_list):
try:
map(int, number_list.split(','))
return True
except ValueError:
return False
def check_body(accepted_items):
body = request.form
new_values = {}
for item in body:
value = request.form[item]
if item not | |
source=source,
destination=destination,
send_asset=send_asset,
send_max=send_max,
dest_asset=dest_asset,
dest_amount=dest_amount,
path=path)
class ChangeTrust(Operation):
"""The :class:`ChangeTrust` object, which represents a ChangeTrust
operation on Stellar's network.
Creates, updates, or deletes a trustline. For more on trustlines, please
refer to the `assets documentation
<https://www.stellar.org/developers/guides/concepts/assets.html>_`.
Threshold: Medium
:param Asset asset: The asset for the trust line.
:param str limit: The limit for the asset, defaults to max int64.
If the limit is set to "0" it deletes the trustline.
:param str source: The source account (defaults to transaction source).
"""
default_limit = "92233720368547.75807"
@classmethod
def type_code(cls):
return Xdr.const.CHANGE_TRUST
def __init__(self, asset, limit=None, source=None):
super(ChangeTrust, self).__init__(source)
self.line = asset
self.limit = limit or self.default_limit
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`ChangeTrust`.
"""
line = self.line.to_xdr_object()
limit = Operation.to_xdr_amount(self.limit)
change_trust_op = Xdr.types.ChangeTrustOp(line, limit)
self.body.type = Xdr.const.CHANGE_TRUST
self.body.changeTrustOp = change_trust_op
return super(ChangeTrust, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`ChangeTrust` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
line = Asset.from_xdr_object(op_xdr_object.body.changeTrustOp.line)
limit = Operation.from_xdr_amount(
op_xdr_object.body.changeTrustOp.limit)
return cls(source=source, asset=line, limit=limit)
class AllowTrust(Operation):
"""The :class:`AllowTrust` object, which represents a AllowTrust operation
on Stellar's network.
Updates the authorized flag of an existing trustline. This can only be
called by the issuer of a trustline's `asset
<https://www.stellar.org/developers/guides/concepts/assets.html>`_.
The issuer can only clear the authorized flag if the issuer has the
AUTH_REVOCABLE_FLAG set. Otherwise, the issuer can only set the authorized
flag.
Threshold: Low
:param str trustor: The trusting account (the one being authorized)
:param str asset_code: The asset code being authorized.
:param str source: The source account (defaults to transaction source).
"""
@classmethod
def type_code(cls):
return Xdr.const.ALLOW_TRUST
def __init__(self, trustor, asset_code, authorize, source=None):
super(AllowTrust, self).__init__(source)
self.trustor = trustor
self.asset_code = asset_code
self.authorize = authorize
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`AllowTrust`.
"""
trustor = account_xdr_object(self.trustor)
length = len(self.asset_code)
assert length <= 12
pad_length = 4 - length if length <= 4 else 12 - length
# asset_code = self.asset_code + '\x00' * pad_length
# asset_code = bytearray(asset_code, encoding='utf-8')
asset_code = bytearray(self.asset_code, 'ascii') + b'\x00' * pad_length
asset = Xdr.nullclass()
if len(asset_code) == 4:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4
asset.assetCode4 = asset_code
else:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12
asset.assetCode12 = asset_code
allow_trust_op = Xdr.types.AllowTrustOp(trustor, asset, self.authorize)
self.body.type = Xdr.const.ALLOW_TRUST
self.body.allowTrustOp = allow_trust_op
return super(AllowTrust, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`AllowTrust` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
trustor = encode_check(
'account',
op_xdr_object.body.allowTrustOp.trustor.ed25519).decode()
authorize = op_xdr_object.body.allowTrustOp.authorize
asset_type = op_xdr_object.body.allowTrustOp.asset.type
if asset_type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4:
asset_code = (
op_xdr_object.body.allowTrustOp.asset.assetCode4.decode())
elif asset_type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12:
asset_code = (
op_xdr_object.body.allowTrustOp.asset.assetCode12.decode())
else:
raise NotImplementedError(
"Operation of asset_type={} is not implemented"
".".format(asset_type.type))
return cls(
source=source,
trustor=trustor,
authorize=authorize,
asset_code=asset_code)
class SetOptions(Operation):
"""The :class:`SetOptions` object, which represents a SetOptions operation
on Stellar's network.
This operation sets the options for an account.
For more information on the signing options, please refer to the `multi-sig
doc <https://www.stellar.org/developers/guides/concepts/multi-sig.html>`_.
When updating signers or other thresholds, the threshold of this operation
is high.
Threshold: Medium or High
:param str inflation_dest: Set this account ID as the account's inflation destination.
:param int clear_flags: Bitmap integer for which account flags to clear.
:param int set_flags: Bitmap integer for which account flags to set.
:param int master_weight: The master key weight.
:param int low_threshold: The sum weight for the low threshold.
:param int med_threshold: The sum weight for the medium threshold.
:param int high_threshold: The sum weight for the high threshold.
:param str home_domain: sets the home domain used for reverse federation lookup.
:param signer_address: signer
:type signer_address: str, bytes
:param str signer_type: The type of signer, it should be 'ed25519PublicKey',
'hashX' or 'preAuthTx'
:param int signer_weight: The weight of the new signer (0 to delete or 1-255)
:param str source: The source account (defaults to transaction source).
"""
@classmethod
def type_code(cls):
return Xdr.const.SET_OPTIONS
def __init__(self,
inflation_dest=None,
clear_flags=None,
set_flags=None,
master_weight=None,
low_threshold=None,
med_threshold=None,
high_threshold=None,
home_domain=None,
signer_address=None,
signer_type=None,
signer_weight=None,
source=None):
super(SetOptions, self).__init__(source)
self.inflation_dest = inflation_dest
self.clear_flags = clear_flags
self.set_flags = set_flags
self.master_weight = master_weight
self.low_threshold = low_threshold
self.med_threshold = med_threshold
self.high_threshold = high_threshold
if isinstance(home_domain, str):
self.home_domain = bytearray(home_domain, encoding='utf-8')
else:
self.home_domain = home_domain
self.signer_address = signer_address
self.signer_type = signer_type
self.signer_weight = signer_weight
if self.signer_address is not None and self.signer_type is None:
try:
is_valid_address(self.signer_address)
except StellarAddressInvalidError:
raise StellarAddressInvalidError('Must be a valid stellar address if not give signer_type')
self.signer_type = 'ed25519PublicKey'
signer_is_invalid_type = (
self.signer_type is not None and
self.signer_type not in ('ed25519PublicKey', 'hashX', 'preAuthTx'))
if signer_is_invalid_type:
raise NotValidParamError('Invalid signer type, sign_type should '
'be ed25519PublicKey, hashX or preAuthTx')
if self.signer_type in ('hashX', 'preAuthTx'):
self.signer_address = convert_hex_to_bytes(self.signer_address)
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`SetOptions`.
"""
def assert_option_array(x):
if x is None:
return []
if not isinstance(x, list):
return [x]
return x
if self.inflation_dest is not None:
inflation_dest = [account_xdr_object(self.inflation_dest)]
else:
inflation_dest = []
self.clear_flags = assert_option_array(self.clear_flags)
self.set_flags = assert_option_array(self.set_flags)
self.master_weight = assert_option_array(self.master_weight)
self.low_threshold = assert_option_array(self.low_threshold)
self.med_threshold = assert_option_array(self.med_threshold)
self.high_threshold = assert_option_array(self.high_threshold)
self.home_domain = assert_option_array(self.home_domain)
req_signer_fields = (self.signer_address, self.signer_type,
self.signer_weight)
if all(signer_field is not None for signer_field in req_signer_fields):
signer = [
Xdr.types.Signer(
signer_key_xdr_object(self.signer_type,
self.signer_address),
self.signer_weight)
]
else:
signer = []
set_options_op = Xdr.types.SetOptionsOp(
inflation_dest, self.clear_flags, self.set_flags,
self.master_weight, self.low_threshold, self.med_threshold,
self.high_threshold, self.home_domain, signer)
self.body.type = Xdr.const.SET_OPTIONS
self.body.setOptionsOp = set_options_op
return super(SetOptions, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`SetOptions` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
if not op_xdr_object.body.setOptionsOp.inflationDest:
inflation_dest = None
else:
inflation_dest = encode_check(
'account', op_xdr_object.body.setOptionsOp.inflationDest[0]
.ed25519).decode()
clear_flags = op_xdr_object.body.setOptionsOp.clearFlags # list
set_flags = op_xdr_object.body.setOptionsOp.setFlags
master_weight = op_xdr_object.body.setOptionsOp.masterWeight
low_threshold = op_xdr_object.body.setOptionsOp.lowThreshold
med_threshold = op_xdr_object.body.setOptionsOp.medThreshold
high_threshold = op_xdr_object.body.setOptionsOp.highThreshold
home_domain = op_xdr_object.body.setOptionsOp.homeDomain
if op_xdr_object.body.setOptionsOp.signer:
key = op_xdr_object.body.setOptionsOp.signer[0].key
if key.type == Xdr.const.SIGNER_KEY_TYPE_ED25519:
signer_address = encode_check('account', key.ed25519).decode()
signer_type = 'ed25519PublicKey'
if key.type == Xdr.const.SIGNER_KEY_TYPE_PRE_AUTH_TX:
signer_address = key.preAuthTx
signer_type = 'preAuthTx'
if key.type == Xdr.const.SIGNER_KEY_TYPE_HASH_X:
signer_address = key.hashX
signer_type = 'hashX'
signer_weight = op_xdr_object.body.setOptionsOp.signer[0].weight
else:
signer_address = None
signer_type = None
signer_weight = None
return cls(
source=source,
inflation_dest=inflation_dest,
clear_flags=clear_flags,
set_flags=set_flags,
master_weight=master_weight,
low_threshold=low_threshold,
med_threshold=med_threshold,
high_threshold=high_threshold,
home_domain=home_domain,
signer_address=signer_address,
signer_type=signer_type,
signer_weight=signer_weight)
class ManageOffer(Operation):
"""The :class:`ManageOffer` object, which represents a ManageOffer
operation on Stellar's network.
Creates, updates, or deletes an offer.
If you want to create a new offer set Offer ID to 0.
If you want to update an existing offer set Offer ID to existing offer ID.
If you want to delete an existing offer set Offer ID to existing offer ID
and set Amount to 0.
Threshold: Medium
:param Asset selling: What you're selling.
:param Asset buying: What you're buying.
:param str amount: The total amount you're selling. If 0,
deletes the offer.
:param price: Price of 1 unit of `selling` in
terms of `buying`.
:type price: str, dict
:param int offer_id: If `0`, will create a new offer (default). Otherwise,
edits an existing offer.
:param str source: The source account (defaults to transaction source).
"""
@classmethod
def type_code(cls):
return Xdr.const.MANAGE_OFFER
def __init__(self, selling, buying, amount, price, offer_id=0,
source=None):
super(ManageOffer, self).__init__(source)
self.selling = selling # Asset
self.buying = buying # Asset
self.amount = amount
self.price = price
self.offer_id = offer_id
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`ManageOffer`.
"""
selling = self.selling.to_xdr_object()
buying = self.buying.to_xdr_object()
price = Operation.to_xdr_price(self.price)
price = Xdr.types.Price(price['n'], price['d'])
amount = Operation.to_xdr_amount(self.amount)
manage_offer_op = Xdr.types.ManageOfferOp(selling, buying, amount,
price, self.offer_id)
self.body.type = Xdr.const.MANAGE_OFFER
self.body.manageOfferOp = manage_offer_op
return super(ManageOffer, self).to_xdr_object()
@classmethod
def from_xdr_object(cls, op_xdr_object):
"""Creates a :class:`ManageOffer` object from an XDR Operation
object.
"""
if not op_xdr_object.sourceAccount:
source = None
else:
source = encode_check(
'account', op_xdr_object.sourceAccount[0].ed25519).decode()
selling = Asset.from_xdr_object(
op_xdr_object.body.manageOfferOp.selling)
buying = Asset.from_xdr_object(op_xdr_object.body.manageOfferOp.buying)
amount = Operation.from_xdr_amount(
op_xdr_object.body.manageOfferOp.amount)
n = op_xdr_object.body.manageOfferOp.price.n
d = op_xdr_object.body.manageOfferOp.price.d
price = division(n, d)
offer_id = op_xdr_object.body.manageOfferOp.offerID
return cls(
source=source,
selling=selling,
buying=buying,
amount=amount,
price=price,
offer_id=offer_id)
class CreatePassiveOffer(Operation):
"""The :class:`CreatePassiveOffer` object, which represents a
CreatePassiveOffer operation on Stellar's network.
A passive offer is an offer that does not act on and take a reverse offer
of | |
import logging
import numpy as np
import pandas as pd
import scipy.stats as ss
from scipy.linalg import eig
from numba import jit
import sg_covid_impact
# from mi_scotland.utils.pandas import preview
logger = logging.getLogger(__name__)
np.seterr(all="raise") # Raise errors on floating point errors
def process_complexity(df, dataset, year, geo_type, cluster, PCI=False):
"""Calculate complexity variables aggregated over the columns.
Calculates: size, complexity index, complexity outlook index
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
PCI (bool, optional): If True, calculate product complexity by
transposing input
# TODO refactor outside of function
Returns:
pandas.DataFrame
"""
X = (
df.pipe(pivot_area_cluster, cluster).fillna(0)
# Transpose if PCI
.pipe(lambda x: x.T if PCI else x)
)
X.index.name = "cluster"
size = X.sum(1).to_frame("size")
complexity = (
X.pipe(create_lq, binary=True)
.pipe(calc_eci, sign_correction=X.sum(1))
.pipe(lambda x: x.rename(columns={"eci": "pci"}) if PCI else x)
)
outlook = X.pipe(complexity_outlook_index).to_frame("coi" if not PCI else "poi")
return (
size.join(complexity)
.join(outlook)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
)
def _melt_keep_index(df, value_name="value"):
""" Fully melt a dataframe keeping index, setting new index as all but `value` """
id_vars = df.index.names
return (
df.reset_index()
.melt(id_vars=id_vars, value_name=value_name)
.set_index([*id_vars, df.columns.name])
)
def process_complexity_unit(df, dataset, year, geo_type, cluster):
"""Calculate unaggregated complexity analysis variables
Calculates: raw value, location quotient, RCA?, distance, opportunity outlook gain
Args:
df (pandas.DataFrame): Long dataframe
Expected columns: `{"geo_nm", "geo_cd", cluster, "value"}`
year (str): Year
dataset (str): Name of dataset
geo_type (str): Type of regional geography
cluster (str): Name of cluster column to use to pivot on
Returns:
pandas.DataFrame
"""
X = df.pipe(pivot_area_cluster, cluster).fillna(0)
X.columns.name = "cluster"
# Index: year, location, cluster, geo_type
# value, LQ, RCA?, distance, OOG
value = X.pipe(_melt_keep_index, "value")
lq = X.pipe(create_lq).pipe(_melt_keep_index, "lq")
has_rca = (lq > 1).rename(columns={"lq": "has_rca"})
d = X.pipe(distance).pipe(_melt_keep_index, "distance")
omega = 1 - X.pipe(proximity_density).pipe(_melt_keep_index, "omega")
oog = opportunity_outlook_gain(X).pipe(_melt_keep_index, "oog")
return (
pd.concat([value, lq, has_rca, d, omega, oog], axis=1)
.assign(year=year, geo_type=geo_type, source=dataset, cluster_type=cluster)
.pipe(preview)
)
@jit(nopython=True)
def _proximity_matrix(M):
""" `proximity_matrix` helper function """
n_c, n_p = M.shape
phi = np.empty((n_p, n_p), dtype=np.float64)
k = M.sum(0) # Ubiquity
for i in range(n_p):
Mci = M[:, i]
for j in range(n_p):
if j > i:
continue
Mcj = M[:, j]
m = max([k[i], k[j]])
if m == 0:
v = np.nan
else:
v = (Mci * Mcj).sum() / m
phi[i, j] = v
phi[j, i] = v
return phi
def proximity_matrix(X, threshold=1):
""" Calculates proximity matrix
Proximity between entries calculates the probability that given a revealed
comparative advantage (RCA) in entity `j`, a location also has a RCA in
entity `i`.
The same probability is calculated with `i` and `j` permuted, and the
minimum of the two probabilities is then taken.
.. math::
\\large{ \\phi_{ij} = \\min\\left\\{\\mathbb{P}(\\text{RCA}_i \\geq 1 |
\\text{RCA}_j \\geq 1), \\mathbb{P}(\\text{RCA}_j \\geq 1 |
\\text{RCA}_i \\geq 1)\\right\\} } \\\\
\\large{ \\phi_{ij} = \\frac{\\sum_c M_{ci} * M_{cj}}{\\max(k_i, k_j)} }
k = \\sum_i M_{i, j}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [n x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
return pd.DataFrame(_proximity_matrix(M.values), index=M.columns, columns=M.columns)
def proximity_density(X, threshold=1):
"""Calculate proximity density
.. math:
\\omega_{ik} = \\frac{ \\sum_j M_{ij} \\phi_{jk}}{\\sum_j \\phi_{jk}}
Args:
X (pandas.DataFrame): Activity matrix [m x n]
threshold (float, optional): Binarisation threshold for location quotient.
Returns:
pandas.DataFrame [m x n]
"""
M = create_lq(X, binary=True, threshold=threshold)
phi = proximity_matrix(X, threshold)
return (M @ phi) / phi.sum(axis=0)
def distance(X, threshold=1):
"""Distance: 1 - proximity density w/ existing capabilities as NaN
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
return (((1 - M) @ phi) / phi.sum(axis=1)) * M.applymap(
lambda x: np.nan if x == 1 else 1
)
def complexity_outlook_index(X, threshold=1):
"""Calculate economic complexity outlook index
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.Series [locations]
"""
M = create_lq(X, threshold, binary=True)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
d = d.loc[:, PCI.index]
return ((1 - d) * (1 - M) * PCI.values.T).sum(axis=1)
def opportunity_outlook_gain(X, threshold=1):
"""Calculate opportunity outlook gain
Value for existing capabilities is NaN.
Args:
X (pandas.DataFrame): [locations x activities]
threshold (float, optional): Binarisation threshold for location
quotient.
Returns:
pandas.DataFrame [locations x activites]
"""
M = create_lq(X, threshold, binary=True)
phi = proximity_matrix(X, threshold)
d = distance(X, threshold)
PCI = calc_eci(M.T, sign_correction=X.sum(0))
if PCI.shape[0] != M.shape[1]:
M = M.loc[:, PCI.index]
phi = phi.loc[PCI.index, PCI.index]
d = d.loc[:, PCI.index]
return (
(1 - M) * PCI.values.T @ (phi / phi.sum(0)) - ((1 - d) * PCI.values.T)
) * M.applymap(lambda x: np.nan if x == 1 else 1)
def pivot_area_cluster(df, cluster, aggfunc=sum):
"""Convert long data into a matrix, pivoting on `cluster`
For example, take BRES/IDBR data at Local authority (LAD) geographic level
and SIC4 sectoral level to create matrix with elements representing the
activity level for a given LAD-SIC4 combination.
Args:
df (pandas.DataFrame): Long dataframe
Expected Columns: `{"geo_nm", "geo_cd", cluster}`
cluster (str): Column of the sector type to pivot on
agg_func (function, optional): Aggregation function passed to
`pandas.DataFrame.pivot_table`.
Returns:
pandas.DataFrame: [number areas x number cluster]
Note: Fills missing values with zero
"""
return (
df
# Fill missing values with zeros
.fillna(0)
# Pivot to [areas x sectors]
.pivot_table(
index=["geo_cd", "geo_nm"],
columns=cluster,
values="value",
fill_value=0,
aggfunc=aggfunc,
)
)
def create_lq(X, threshold=1, binary=False):
"""Calculate the location quotient.
Divides the share of activity in a location by the share of activity in
the UK total.
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
threshold (float, optional): Binarisation threshold.
binary (bool, optional): If True, binarise matrix at `threshold`.
and values are activity in a given sector at a location.
Returns:
pandas.DataFrame
#UTILS
"""
Xm = X.values
with np.errstate(invalid="ignore"): # Accounted for divide by zero
X = pd.DataFrame(
(Xm * Xm.sum()) / (Xm.sum(1)[:, np.newaxis] * Xm.sum(0)),
index=X.index,
columns=X.columns,
).fillna(0)
return (X > threshold).astype(float) if binary else X
def calc_fitness(X, n_iters):
"""Calculate the fitness metric of economic complexity
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
x = np.ones(X.shape[0])
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / x.mean()
return pd.DataFrame(np.log(x), index=X.index, columns=["fitness"])
def calc_fit_plus(X, n_iters, correction=True):
"""Calculate the fitness+ (ECI+) metric of economic complexity
Args:
X (pandas.Dataframe): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
n_iters (int): Number of iterations to calculate fitness for
correction (bool, optional): If true, apply logarithmic correction.
Returns:
pandas.Dataframe
#UTILS
"""
X = _drop_zero_rows_cols(X)
if X.dtypes[0] == bool:
norm_mean = np.mean
else:
norm_mean = ss.gmean
x = X.values.sum(axis=1)
x = x / norm_mean(x)
for n in range(1, n_iters):
x = (X.values / (X.values / x[:, np.newaxis]).sum(0)).sum(1)
x = x / norm_mean(x)
if correction:
x = np.log(x) - np.log((X / X.sum(0)).sum(1))
else:
pass # x = np.log(x)
return pd.DataFrame(x, index=X.index, columns=["fit_p"])
def calc_eci(X, sign_correction=None):
"""Calculate the original economic complexity index (ECI).
Args:
X (pandas.DataFrame): Rows are locations, columns are sectors,
and values are activity in a given sector at a location.
sign_correction (pd.Series, optional): Array to correlate with ECI
to calculate sign correction. Typically, ubiquity. If None, uses
the sum over columns of the input data.
Returns:
pandas.DataFrame
#UTILS
"""
X = _drop_zero_rows_cols(X)
C = np.diag(1 / X.sum(1)) # Diagonal entries k_C
P = np.diag(1 / X.sum(0)) # Diagonal entries k_P
H = C @ X.values @ P @ X.T.values
w, v = eig(H, left=False, right=True)
eci = pd.DataFrame(v[:, 1].real, index=X.index, columns=["eci"])
# Positively correlate `sign_correction` (some proxy for diversity) w/ ECI
if sign_correction is None:
sign_correction = X.sum(1)
else:
sign_correction = sign_correction.loc[X.index]
sign = np.sign(np.corrcoef(sign_correction, eci.eci.values)[0, 1])
logger.info(f"CI sign: {sign}")
return (eci - | |
10:30:00,54.33,3581.0,52.99
2335,10,19698.0,390,Leisure,1970-01-01 10:30:00,26.71,1781.0,26.35
2336,3,7605.0,390,Housework,1970-01-01 10:30:00,10.31,753.0,11.14
2337,11,3718.0,390,Travel and Other,1970-01-01 10:30:00,5.04,379.0,5.61
2338,4,2216.0,390,Child Care,1970-01-01 10:30:00,3.0,150.0,2.22
2339,5,447.0,390,Adult Care,1970-01-01 10:30:00,0.61,114.0,1.69
2340,6,39399.0,391,Work and Education,1970-01-01 10:31:00,53.42,3513.0,51.98
2341,10,19702.0,391,Leisure,1970-01-01 10:31:00,26.71,1777.0,26.29
2342,3,7614.0,391,Housework,1970-01-01 10:31:00,10.32,752.0,11.13
2343,11,4405.0,391,Travel and Other,1970-01-01 10:31:00,5.97,449.0,6.64
2344,4,2189.0,391,Child Care,1970-01-01 10:31:00,2.97,144.0,2.13
2345,5,449.0,391,Adult Care,1970-01-01 10:31:00,0.61,123.0,1.82
2346,6,39406.0,392,Work and Education,1970-01-01 10:32:00,53.43,3514.0,52.0
2347,10,19716.0,392,Leisure,1970-01-01 10:32:00,26.73,1775.0,26.27
2348,3,7614.0,392,Housework,1970-01-01 10:32:00,10.32,754.0,11.16
2349,11,4385.0,392,Travel and Other,1970-01-01 10:32:00,5.95,446.0,6.6
2350,4,2190.0,392,Child Care,1970-01-01 10:32:00,2.97,145.0,2.15
2351,5,447.0,392,Adult Care,1970-01-01 10:32:00,0.61,124.0,1.83
2352,6,39408.0,393,Work and Education,1970-01-01 10:33:00,53.43,3515.0,52.01
2353,10,19730.0,393,Leisure,1970-01-01 10:33:00,26.75,1773.0,26.24
2354,3,7617.0,393,Housework,1970-01-01 10:33:00,10.33,753.0,11.14
2355,11,4371.0,393,Travel and Other,1970-01-01 10:33:00,5.93,451.0,6.67
2356,4,2187.0,393,Child Care,1970-01-01 10:33:00,2.97,144.0,2.13
2357,5,445.0,393,Adult Care,1970-01-01 10:33:00,0.6,122.0,1.81
2358,6,39414.0,394,Work and Education,1970-01-01 10:34:00,53.44,3516.0,52.03
2359,10,19735.0,394,Leisure,1970-01-01 10:34:00,26.76,1767.0,26.15
2360,3,7622.0,394,Housework,1970-01-01 10:34:00,10.33,753.0,11.14
2361,11,4351.0,394,Travel and Other,1970-01-01 10:34:00,5.9,457.0,6.76
2362,4,2190.0,394,Child Care,1970-01-01 10:34:00,2.97,144.0,2.13
2363,5,446.0,394,Adult Care,1970-01-01 10:34:00,0.6,121.0,1.79
2364,6,39416.0,395,Work and Education,1970-01-01 10:35:00,53.44,3515.0,52.01
2365,10,19742.0,395,Leisure,1970-01-01 10:35:00,26.77,1768.0,26.16
2366,3,7625.0,395,Housework,1970-01-01 10:35:00,10.34,754.0,11.16
2367,11,4337.0,395,Travel and Other,1970-01-01 10:35:00,5.88,456.0,6.75
2368,4,2191.0,395,Child Care,1970-01-01 10:35:00,2.97,144.0,2.13
2369,5,447.0,395,Adult Care,1970-01-01 10:35:00,0.61,121.0,1.79
2370,6,39524.0,396,Work and Education,1970-01-01 10:36:00,53.59,3519.0,52.07
2371,10,19770.0,396,Leisure,1970-01-01 10:36:00,26.8,1761.0,26.06
2372,3,7639.0,396,Housework,1970-01-01 10:36:00,10.36,750.0,11.1
2373,11,4194.0,396,Travel and Other,1970-01-01 10:36:00,5.69,451.0,6.67
2374,4,2191.0,396,Child Care,1970-01-01 10:36:00,2.97,147.0,2.18
2375,5,440.0,396,Adult Care,1970-01-01 10:36:00,0.6,130.0,1.92
2376,6,39535.0,397,Work and Education,1970-01-01 10:37:00,53.6,3519.0,52.07
2377,10,19771.0,397,Leisure,1970-01-01 10:37:00,26.81,1761.0,26.06
2378,3,7639.0,397,Housework,1970-01-01 10:37:00,10.36,745.0,11.02
2379,11,4189.0,397,Travel and Other,1970-01-01 10:37:00,5.68,456.0,6.75
2380,4,2188.0,397,Child Care,1970-01-01 10:37:00,2.97,147.0,2.18
2381,5,436.0,397,Adult Care,1970-01-01 10:37:00,0.59,130.0,1.92
2382,6,39537.0,398,Work and Education,1970-01-01 10:38:00,53.6,3522.0,52.12
2383,10,19778.0,398,Leisure,1970-01-01 10:38:00,26.81,1757.0,26.0
2384,3,7632.0,398,Housework,1970-01-01 10:38:00,10.35,745.0,11.02
2385,11,4175.0,398,Travel and Other,1970-01-01 10:38:00,5.66,456.0,6.75
2386,4,2196.0,398,Child Care,1970-01-01 10:38:00,2.98,146.0,2.16
2387,5,440.0,398,Adult Care,1970-01-01 10:38:00,0.6,132.0,1.95
2388,6,39532.0,399,Work and Education,1970-01-01 10:39:00,53.6,3521.0,52.1
2389,10,19798.0,399,Leisure,1970-01-01 10:39:00,26.84,1757.0,26.0
2390,3,7624.0,399,Housework,1970-01-01 10:39:00,10.34,746.0,11.04
2391,11,4165.0,399,Travel and Other,1970-01-01 10:39:00,5.65,453.0,6.7
2392,4,2198.0,399,Child Care,1970-01-01 10:39:00,2.98,147.0,2.18
2393,5,441.0,399,Adult Care,1970-01-01 10:39:00,0.6,134.0,1.98
2394,6,39532.0,400,Work and Education,1970-01-01 10:40:00,53.6,3522.0,52.12
2395,10,19804.0,400,Leisure,1970-01-01 10:40:00,26.85,1759.0,26.03
2396,3,7628.0,400,Housework,1970-01-01 10:40:00,10.34,746.0,11.04
2397,11,4156.0,400,Travel and Other,1970-01-01 10:40:00,5.63,449.0,6.64
2398,4,2198.0,400,Child Care,1970-01-01 10:40:00,2.98,147.0,2.18
2399,5,440.0,400,Adult Care,1970-01-01 10:40:00,0.6,135.0,2.0
2400,6,39713.0,401,Work and Education,1970-01-01 10:41:00,53.84,3530.0,52.23
2401,10,19767.0,401,Leisure,1970-01-01 10:41:00,26.8,1759.0,26.03
2402,3,7615.0,401,Housework,1970-01-01 10:41:00,10.32,741.0,10.96
2403,11,4002.0,401,Travel and Other,1970-01-01 10:41:00,5.43,450.0,6.66
2404,4,2218.0,401,Child Care,1970-01-01 10:41:00,3.01,145.0,2.15
2405,5,443.0,401,Adult Care,1970-01-01 10:41:00,0.6,133.0,1.97
2406,6,39724.0,402,Work and Education,1970-01-01 10:42:00,53.86,3532.0,52.26
2407,10,19786.0,402,Leisure,1970-01-01 10:42:00,26.83,1758.0,26.01
2408,3,7606.0,402,Housework,1970-01-01 10:42:00,10.31,741.0,10.96
2409,11,3987.0,402,Travel and Other,1970-01-01 10:42:00,5.41,447.0,6.61
2410,4,2215.0,402,Child Care,1970-01-01 10:42:00,3.0,146.0,2.16
2411,5,440.0,402,Adult Care,1970-01-01 10:42:00,0.6,134.0,1.98
2412,6,39734.0,403,Work and Education,1970-01-01 10:43:00,53.87,3532.0,52.26
2413,10,19793.0,403,Leisure,1970-01-01 10:43:00,26.84,1761.0,26.06
2414,3,7608.0,403,Housework,1970-01-01 10:43:00,10.31,743.0,10.99
2415,11,3975.0,403,Travel and Other,1970-01-01 10:43:00,5.39,442.0,6.54
2416,4,2208.0,403,Child Care,1970-01-01 10:43:00,2.99,145.0,2.15
2417,5,440.0,403,Adult Care,1970-01-01 10:43:00,0.6,135.0,2.0
2418,6,39734.0,404,Work and Education,1970-01-01 10:44:00,53.87,3532.0,52.26
2419,10,19801.0,404,Leisure,1970-01-01 10:44:00,26.85,1763.0,26.09
2420,3,7611.0,404,Housework,1970-01-01 10:44:00,10.32,744.0,11.01
2421,11,3965.0,404,Travel and Other,1970-01-01 10:44:00,5.38,439.0,6.5
2422,4,2205.0,404,Child Care,1970-01-01 10:44:00,2.99,145.0,2.15
2423,5,442.0,404,Adult Care,1970-01-01 10:44:00,0.6,135.0,2.0
2424,6,39743.0,405,Work and Education,1970-01-01 10:45:00,53.88,3532.0,52.26
2425,10,19796.0,405,Leisure,1970-01-01 10:45:00,26.84,1762.0,26.07
2426,3,7609.0,405,Housework,1970-01-01 10:45:00,10.32,743.0,10.99
2427,11,3961.0,405,Travel and Other,1970-01-01 10:45:00,5.37,439.0,6.5
2428,4,2206.0,405,Child Care,1970-01-01 10:45:00,2.99,148.0,2.19
2429,5,443.0,405,Adult Care,1970-01-01 10:45:00,0.6,134.0,1.98
2430,6,40163.0,406,Work and Education,1970-01-01 10:46:00,54.45,3561.0,52.69
2431,10,19346.0,406,Leisure,1970-01-01 10:46:00,26.23,1748.0,25.87
2432,3,7560.0,406,Housework,1970-01-01 10:46:00,10.25,734.0,10.86
2433,11,4057.0,406,Travel and Other,1970-01-01 10:46:00,5.5,431.0,6.38
2434,4,2179.0,406,Child Care,1970-01-01 10:46:00,2.95,151.0,2.23
2435,5,453.0,406,Adult Care,1970-01-01 10:46:00,0.61,133.0,1.97
2436,6,40171.0,407,Work and Education,1970-01-01 10:47:00,54.46,3565.0,52.75
2437,10,19357.0,407,Leisure,1970-01-01 10:47:00,26.24,1747.0,25.85
2438,3,7560.0,407,Housework,1970-01-01 10:47:00,10.25,741.0,10.96
2439,11,4042.0,407,Travel and Other,1970-01-01 10:47:00,5.48,425.0,6.29
2440,4,2177.0,407,Child Care,1970-01-01 10:47:00,2.95,150.0,2.22
2441,5,451.0,407,Adult Care,1970-01-01 10:47:00,0.61,130.0,1.92
2442,6,40199.0,408,Work and Education,1970-01-01 10:48:00,54.5,3568.0,52.8
2443,10,19399.0,408,Leisure,1970-01-01 10:48:00,26.3,1753.0,25.94
2444,3,7550.0,408,Housework,1970-01-01 10:48:00,10.24,738.0,10.92
2445,11,3982.0,408,Travel and Other,1970-01-01 10:48:00,5.4,420.0,6.21
2446,4,2181.0,408,Child Care,1970-01-01 10:48:00,2.96,150.0,2.22
2447,5,447.0,408,Adult Care,1970-01-01 10:48:00,0.61,129.0,1.91
2448,6,40211.0,409,Work and Education,1970-01-01 10:49:00,54.52,3568.0,52.8
2449,10,19399.0,409,Leisure,1970-01-01 10:49:00,26.3,1755.0,25.97
2450,3,7549.0,409,Housework,1970-01-01 10:49:00,10.23,740.0,10.95
2451,11,3969.0,409,Travel and Other,1970-01-01 10:49:00,5.38,417.0,6.17
2452,4,2186.0,409,Child Care,1970-01-01 10:49:00,2.96,150.0,2.22
2453,5,444.0,409,Adult Care,1970-01-01 10:49:00,0.6,128.0,1.89
2454,6,40211.0,410,Work and Education,1970-01-01 10:50:00,54.52,3568.0,52.8
2455,10,19407.0,410,Leisure,1970-01-01 10:50:00,26.31,1761.0,26.06
2456,3,7541.0,410,Housework,1970-01-01 10:50:00,10.22,736.0,10.89
2457,11,3970.0,410,Travel and Other,1970-01-01 10:50:00,5.38,415.0,6.14
2458,4,2186.0,410,Child Care,1970-01-01 10:50:00,2.96,149.0,2.2
2459,5,443.0,410,Adult Care,1970-01-01 10:50:00,0.6,129.0,1.91
2460,6,40289.0,411,Work and Education,1970-01-01 10:51:00,54.62,3582.0,53.0
2461,10,19407.0,411,Leisure,1970-01-01 10:51:00,26.31,1755.0,25.97
2462,3,7518.0,411,Housework,1970-01-01 10:51:00,10.19,727.0,10.76
2463,11,3873.0,411,Travel and Other,1970-01-01 10:51:00,5.25,410.0,6.07
2464,4,2220.0,411,Child Care,1970-01-01 10:51:00,3.01,154.0,2.28
2465,5,451.0,411,Adult Care,1970-01-01 10:51:00,0.61,130.0,1.92
2466,6,40290.0,412,Work and Education,1970-01-01 10:52:00,54.62,3581.0,52.99
2467,10,19419.0,412,Leisure,1970-01-01 10:52:00,26.33,1757.0,26.0
2468,3,7517.0,412,Housework,1970-01-01 10:52:00,10.19,728.0,10.77
2469,11,3866.0,412,Travel and Other,1970-01-01 10:52:00,5.24,409.0,6.05
2470,4,2217.0,412,Child Care,1970-01-01 10:52:00,3.01,153.0,2.26
2471,5,449.0,412,Adult Care,1970-01-01 10:52:00,0.61,130.0,1.92
2472,6,40308.0,413,Work and Education,1970-01-01 10:53:00,54.65,3579.0,52.96
2473,10,19433.0,413,Leisure,1970-01-01 10:53:00,26.35,1761.0,26.06
2474,3,7512.0,413,Housework,1970-01-01 10:53:00,10.18,728.0,10.77
2475,11,3848.0,413,Travel and Other,1970-01-01 10:53:00,5.22,407.0,6.02
2476,4,2210.0,413,Child Care,1970-01-01 10:53:00,3.0,154.0,2.28
2477,5,447.0,413,Adult Care,1970-01-01 10:53:00,0.61,129.0,1.91
2478,6,40307.0,414,Work and Education,1970-01-01 10:54:00,54.65,3579.0,52.96
2479,10,19463.0,414,Leisure,1970-01-01 10:54:00,26.39,1754.0,25.95
2480,3,7516.0,414,Housework,1970-01-01 10:54:00,10.19,730.0,10.8
2481,11,3821.0,414,Travel and Other,1970-01-01 10:54:00,5.18,411.0,6.08
2482,4,2205.0,414,Child Care,1970-01-01 10:54:00,2.99,154.0,2.28
2483,5,446.0,414,Adult Care,1970-01-01 10:54:00,0.6,130.0,1.92
2484,6,40315.0,415,Work and Education,1970-01-01 10:55:00,54.66,3579.0,52.96
2485,10,19472.0,415,Leisure,1970-01-01 10:55:00,26.4,1758.0,26.01
2486,3,7517.0,415,Housework,1970-01-01 10:55:00,10.19,733.0,10.85
2487,11,3804.0,415,Travel and Other,1970-01-01 10:55:00,5.16,406.0,6.01
2488,4,2203.0,415,Child Care,1970-01-01 10:55:00,2.99,153.0,2.26
2489,5,447.0,415,Adult Care,1970-01-01 10:55:00,0.61,129.0,1.91
2490,6,40419.0,416,Work and Education,1970-01-01 10:56:00,54.8,3580.0,52.97
2491,10,19478.0,416,Leisure,1970-01-01 10:56:00,26.41,1762.0,26.07
2492,3,7488.0,416,Housework,1970-01-01 10:56:00,10.15,727.0,10.76
2493,11,3700.0,416,Travel and Other,1970-01-01 10:56:00,5.02,409.0,6.05
2494,4,2217.0,416,Child Care,1970-01-01 10:56:00,3.01,152.0,2.25
2495,5,456.0,416,Adult Care,1970-01-01 10:56:00,0.62,128.0,1.89
2496,6,40419.0,417,Work and Education,1970-01-01 10:57:00,54.8,3581.0,52.99
2497,10,19489.0,417,Leisure,1970-01-01 10:57:00,26.42,1771.0,26.21
2498,3,7489.0,417,Housework,1970-01-01 10:57:00,10.15,724.0,10.71
2499,11,3697.0,417,Travel and Other,1970-01-01 10:57:00,5.01,402.0,5.95
2500,4,2208.0,417,Child Care,1970-01-01 10:57:00,2.99,152.0,2.25
2501,5,456.0,417,Adult Care,1970-01-01 10:57:00,0.62,128.0,1.89
2502,6,40419.0,418,Work and Education,1970-01-01 10:58:00,54.8,3580.0,52.97
2503,10,19498.0,418,Leisure,1970-01-01 10:58:00,26.44,1775.0,26.27
2504,3,7480.0,418,Housework,1970-01-01 10:58:00,10.14,720.0,10.65
2505,11,3700.0,418,Travel and Other,1970-01-01 10:58:00,5.02,402.0,5.95
2506,4,2209.0,418,Child Care,1970-01-01 10:58:00,2.99,151.0,2.23
2507,5,452.0,418,Adult Care,1970-01-01 10:58:00,0.61,130.0,1.92
2508,6,40426.0,419,Work and Education,1970-01-01 10:59:00,54.81,3580.0,52.97
2509,10,19493.0,419,Leisure,1970-01-01 10:59:00,26.43,1772.0,26.22
2510,3,7471.0,419,Housework,1970-01-01 10:59:00,10.13,719.0,10.64
2511,11,3709.0,419,Travel and Other,1970-01-01 10:59:00,5.03,407.0,6.02
2512,4,2205.0,419,Child Care,1970-01-01 10:59:00,2.99,151.0,2.23
2513,5,454.0,419,Adult Care,1970-01-01 10:59:00,0.62,129.0,1.91
2514,6,40427.0,420,Work and Education,1970-01-01 11:00:00,54.81,3581.0,52.99
2515,10,19494.0,420,Leisure,1970-01-01 11:00:00,26.43,1767.0,26.15
2516,3,7474.0,420,Housework,1970-01-01 11:00:00,10.13,719.0,10.64
2517,11,3702.0,420,Travel and Other,1970-01-01 11:00:00,5.02,412.0,6.1
2518,4,2206.0,420,Child Care,1970-01-01 11:00:00,2.99,150.0,2.22
2519,5,455.0,420,Adult Care,1970-01-01 11:00:00,0.62,129.0,1.91
2520,6,38186.0,421,Work and Education,1970-01-01 11:01:00,51.77,3434.0,50.81
2521,10,20245.0,421,Leisure,1970-01-01 11:01:00,27.45,1825.0,27.01
2522,3,7522.0,421,Housework,1970-01-01 11:01:00,10.2,700.0,10.36
2523,11,5278.0,421,Travel and Other,1970-01-01 11:01:00,7.16,510.0,7.55
2524,4,2081.0,421,Child Care,1970-01-01 11:01:00,2.82,145.0,2.15
2525,5,446.0,421,Adult Care,1970-01-01 11:01:00,0.6,144.0,2.13
2526,6,38198.0,422,Work and Education,1970-01-01 11:02:00,51.79,3436.0,50.84
2527,10,20271.0,422,Leisure,1970-01-01 11:02:00,27.48,1826.0,27.02
2528,3,7532.0,422,Housework,1970-01-01 11:02:00,10.21,699.0,10.34
2529,11,5236.0,422,Travel and Other,1970-01-01 11:02:00,7.1,504.0,7.46
2530,4,2076.0,422,Child Care,1970-01-01 11:02:00,2.81,148.0,2.19
2531,5,445.0,422,Adult Care,1970-01-01 11:02:00,0.6,145.0,2.15
2532,6,38207.0,423,Work and Education,1970-01-01 11:03:00,51.8,3435.0,50.83
2533,10,20329.0,423,Leisure,1970-01-01 11:03:00,27.56,1825.0,27.01
2534,3,7522.0,423,Housework,1970-01-01 11:03:00,10.2,699.0,10.34
2535,11,5179.0,423,Travel and Other,1970-01-01 11:03:00,7.02,508.0,7.52
2536,4,2076.0,423,Child Care,1970-01-01 11:03:00,2.81,148.0,2.19
2537,5,445.0,423,Adult Care,1970-01-01 11:03:00,0.6,143.0,2.12
2538,6,38204.0,424,Work and Education,1970-01-01 11:04:00,51.8,3434.0,50.81
2539,10,20372.0,424,Leisure,1970-01-01 11:04:00,27.62,1829.0,27.06
2540,3,7534.0,424,Housework,1970-01-01 11:04:00,10.21,698.0,10.33
2541,11,5130.0,424,Travel and Other,1970-01-01 11:04:00,6.96,510.0,7.55
2542,4,2074.0,424,Child Care,1970-01-01 11:04:00,2.81,147.0,2.18
2543,5,444.0,424,Adult Care,1970-01-01 11:04:00,0.6,140.0,2.07
2544,6,38208.0,425,Work and Education,1970-01-01 11:05:00,51.8,3436.0,50.84
2545,10,20404.0,425,Leisure,1970-01-01 11:05:00,27.66,1829.0,27.06
2546,3,7528.0,425,Housework,1970-01-01 11:05:00,10.21,699.0,10.34
2547,11,5097.0,425,Travel and Other,1970-01-01 11:05:00,6.91,509.0,7.53
2548,4,2077.0,425,Child Care,1970-01-01 11:05:00,2.82,145.0,2.15
2549,5,444.0,425,Adult Care,1970-01-01 11:05:00,0.6,140.0,2.07
2550,6,38298.0,426,Work and Education,1970-01-01 11:06:00,51.92,3443.0,50.95
2551,10,20632.0,426,Leisure,1970-01-01 11:06:00,27.97,1851.0,27.39
2552,3,7528.0,426,Housework,1970-01-01 11:06:00,10.21,692.0,10.24
2553,11,4756.0,426,Travel and Other,1970-01-01 11:06:00,6.45,491.0,7.27
2554,4,2094.0,426,Child Care,1970-01-01 11:06:00,2.84,142.0,2.1
2555,5,450.0,426,Adult Care,1970-01-01 11:06:00,0.61,139.0,2.06
2556,6,38291.0,427,Work and Education,1970-01-01 11:07:00,51.91,3443.0,50.95
2557,10,20657.0,427,Leisure,1970-01-01 11:07:00,28.01,1853.0,27.42
2558,3,7530.0,427,Housework,1970-01-01 11:07:00,10.21,691.0,10.22
2559,11,4731.0,427,Travel and Other,1970-01-01 11:07:00,6.41,490.0,7.25
2560,4,2100.0,427,Child Care,1970-01-01 11:07:00,2.85,142.0,2.1
2561,5,449.0,427,Adult Care,1970-01-01 11:07:00,0.61,139.0,2.06
2562,6,38306.0,428,Work and Education,1970-01-01 11:08:00,51.93,3444.0,50.96
2563,10,20675.0,428,Leisure,1970-01-01 11:08:00,28.03,1855.0,27.45
2564,3,7532.0,428,Housework,1970-01-01 11:08:00,10.21,694.0,10.27
2565,11,4691.0,428,Travel and Other,1970-01-01 11:08:00,6.36,482.0,7.13
2566,4,2104.0,428,Child Care,1970-01-01 11:08:00,2.85,144.0,2.13
2567,5,450.0,428,Adult Care,1970-01-01 11:08:00,0.61,139.0,2.06
2568,6,38316.0,429,Work and Education,1970-01-01 11:09:00,51.95,3445.0,50.98
2569,10,20675.0,429,Leisure,1970-01-01 11:09:00,28.03,1861.0,27.54
2570,3,7524.0,429,Housework,1970-01-01 11:09:00,10.2,693.0,10.25
2571,11,4686.0,429,Travel and Other,1970-01-01 11:09:00,6.35,477.0,7.06
2572,4,2107.0,429,Child Care,1970-01-01 11:09:00,2.86,143.0,2.12
2573,5,450.0,429,Adult Care,1970-01-01 11:09:00,0.61,139.0,2.06
2574,6,38315.0,430,Work and Education,1970-01-01 11:10:00,51.95,3447.0,51.01
2575,10,20686.0,430,Leisure,1970-01-01 11:10:00,28.05,1860.0,27.52
2576,3,7523.0,430,Housework,1970-01-01 11:10:00,10.2,694.0,10.27
2577,11,4679.0,430,Travel and Other,1970-01-01 11:10:00,6.34,475.0,7.03
2578,4,2106.0,430,Child Care,1970-01-01 11:10:00,2.86,143.0,2.12
2579,5,449.0,430,Adult Care,1970-01-01 11:10:00,0.61,139.0,2.06
2580,6,38455.0,431,Work and Education,1970-01-01 11:11:00,52.14,3453.0,51.09
2581,10,20878.0,431,Leisure,1970-01-01 11:11:00,28.31,1897.0,28.07
2582,3,7509.0,431,Housework,1970-01-01 11:11:00,10.18,687.0,10.17
2583,11,4336.0,431,Travel and Other,1970-01-01 11:11:00,5.88,442.0,6.54
2584,4,2122.0,431,Child Care,1970-01-01 11:11:00,2.88,144.0,2.13
2585,5,458.0,431,Adult Care,1970-01-01 11:11:00,0.62,135.0,2.0
2586,6,38458.0,432,Work and Education,1970-01-01 11:12:00,52.14,3455.0,51.12
2587,10,20888.0,432,Leisure,1970-01-01 11:12:00,28.32,1896.0,28.06
2588,3,7507.0,432,Housework,1970-01-01 11:12:00,10.18,686.0,10.15
2589,11,4322.0,432,Travel and Other,1970-01-01 11:12:00,5.86,442.0,6.54
2590,4,2121.0,432,Child Care,1970-01-01 11:12:00,2.88,144.0,2.13
2591,5,462.0,432,Adult Care,1970-01-01 11:12:00,0.63,135.0,2.0
2592,6,38465.0,433,Work and Education,1970-01-01 11:13:00,52.15,3458.0,51.17
2593,10,20915.0,433,Leisure,1970-01-01 11:13:00,28.36,1897.0,28.07
2594,3,7494.0,433,Housework,1970-01-01 11:13:00,10.16,686.0,10.15
2595,11,4306.0,433,Travel and Other,1970-01-01 11:13:00,5.84,439.0,6.5
2596,4,2118.0,433,Child Care,1970-01-01 11:13:00,2.87,143.0,2.12
2597,5,460.0,433,Adult Care,1970-01-01 11:13:00,0.62,135.0,2.0
2598,6,38465.0,434,Work and Education,1970-01-01 11:14:00,52.15,3459.0,51.18
2599,10,20921.0,434,Leisure,1970-01-01 11:14:00,28.36,1902.0,28.14
2600,3,7488.0,434,Housework,1970-01-01 11:14:00,10.15,687.0,10.17
2601,11,4314.0,434,Travel and Other,1970-01-01 11:14:00,5.85,431.0,6.38
2602,4,2113.0,434,Child Care,1970-01-01 11:14:00,2.86,145.0,2.15
2603,5,457.0,434,Adult Care,1970-01-01 11:14:00,0.62,134.0,1.98
2604,6,38470.0,435,Work and Education,1970-01-01 11:15:00,52.16,3460.0,51.2
2605,10,20929.0,435,Leisure,1970-01-01 11:15:00,28.38,1902.0,28.14
2606,3,7488.0,435,Housework,1970-01-01 11:15:00,10.15,690.0,10.21
2607,11,4306.0,435,Travel and Other,1970-01-01 11:15:00,5.84,425.0,6.29
2608,4,2109.0,435,Child Care,1970-01-01 11:15:00,2.86,146.0,2.16
2609,5,456.0,435,Adult Care,1970-01-01 11:15:00,0.62,135.0,2.0
2610,6,38639.0,436,Work and Education,1970-01-01 11:16:00,52.39,3489.0,51.63
2611,10,20777.0,436,Leisure,1970-01-01 11:16:00,28.17,1873.0,27.72
2612,3,7396.0,436,Housework,1970-01-01 11:16:00,10.03,673.0,9.96
2613,11,4382.0,436,Travel and Other,1970-01-01 11:16:00,5.94,433.0,6.41
2614,4,2102.0,436,Child Care,1970-01-01 11:16:00,2.85,153.0,2.26
2615,5,462.0,436,Adult Care,1970-01-01 11:16:00,0.63,137.0,2.03
2616,6,38647.0,437,Work and Education,1970-01-01 11:17:00,52.4,3487.0,51.6
2617,10,20791.0,437,Leisure,1970-01-01 11:17:00,28.19,1875.0,27.74
2618,3,7397.0,437,Housework,1970-01-01 11:17:00,10.03,673.0,9.96
2619,11,4362.0,437,Travel and Other,1970-01-01 11:17:00,5.91,437.0,6.47
2620,4,2102.0,437,Child Care,1970-01-01 11:17:00,2.85,152.0,2.25
2621,5,459.0,437,Adult Care,1970-01-01 11:17:00,0.62,134.0,1.98
2622,6,38663.0,438,Work and Education,1970-01-01 11:18:00,52.42,3490.0,51.64
2623,10,20805.0,438,Leisure,1970-01-01 11:18:00,28.21,1876.0,27.76
2624,3,7402.0,438,Housework,1970-01-01 11:18:00,10.04,672.0,9.94
2625,11,4330.0,438,Travel and Other,1970-01-01 11:18:00,5.87,435.0,6.44
2626,4,2100.0,438,Child Care,1970-01-01 11:18:00,2.85,152.0,2.25
2627,5,458.0,438,Adult Care,1970-01-01 11:18:00,0.62,133.0,1.97
2628,6,38661.0,439,Work and Education,1970-01-01 11:19:00,52.42,3490.0,51.64
2629,10,20816.0,439,Leisure,1970-01-01 11:19:00,28.22,1882.0,27.85
2630,3,7406.0,439,Housework,1970-01-01 11:19:00,10.04,670.0,9.91
2631,11,4309.0,439,Travel and Other,1970-01-01 11:19:00,5.84,430.0,6.36
2632,4,2110.0,439,Child Care,1970-01-01 11:19:00,2.86,152.0,2.25
2633,5,456.0,439,Adult Care,1970-01-01 11:19:00,0.62,134.0,1.98
2634,6,38665.0,440,Work and Education,1970-01-01 11:20:00,52.42,3489.0,51.63
2635,10,20820.0,440,Leisure,1970-01-01 11:20:00,28.23,1892.0,28.0
2636,3,7406.0,440,Housework,1970-01-01 11:20:00,10.04,668.0,9.88
2637,11,4301.0,440,Travel and Other,1970-01-01 11:20:00,5.83,423.0,6.26
2638,4,2111.0,440,Child Care,1970-01-01 11:20:00,2.86,151.0,2.23
2639,5,455.0,440,Adult Care,1970-01-01 11:20:00,0.62,135.0,2.0
2640,6,38771.0,441,Work and Education,1970-01-01 11:21:00,52.57,3496.0,51.73
2641,10,20921.0,441,Leisure,1970-01-01 11:21:00,28.36,1880.0,27.82
2642,3,7352.0,441,Housework,1970-01-01 11:21:00,9.97,669.0,9.9
2643,11,4137.0,441,Travel and Other,1970-01-01 11:21:00,5.61,428.0,6.33
2644,4,2111.0,441,Child Care,1970-01-01 11:21:00,2.86,147.0,2.18
2645,5,466.0,441,Adult Care,1970-01-01 11:21:00,0.63,138.0,2.04
2646,6,38779.0,442,Work and Education,1970-01-01 11:22:00,52.58,3496.0,51.73
2647,10,20940.0,442,Leisure,1970-01-01 11:22:00,28.39,1878.0,27.79
2648,3,7340.0,442,Housework,1970-01-01 11:22:00,9.95,671.0,9.93
2649,11,4119.0,442,Travel and Other,1970-01-01 11:22:00,5.58,431.0,6.38
2650,4,2116.0,442,Child Care,1970-01-01 11:22:00,2.87,147.0,2.18
2651,5,464.0,442,Adult Care,1970-01-01 11:22:00,0.63,135.0,2.0
2652,6,38775.0,443,Work and Education,1970-01-01 11:23:00,52.57,3495.0,51.72
2653,10,20965.0,443,Leisure,1970-01-01 11:23:00,28.42,1876.0,27.76
2654,3,7342.0,443,Housework,1970-01-01 11:23:00,9.95,672.0,9.94
2655,11,4098.0,443,Travel and Other,1970-01-01 11:23:00,5.56,432.0,6.39
2656,4,2113.0,443,Child Care,1970-01-01 11:23:00,2.86,147.0,2.18
2657,5,465.0,443,Adult Care,1970-01-01 11:23:00,0.63,136.0,2.01
2658,6,38774.0,444,Work and Education,1970-01-01 11:24:00,52.57,3494.0,51.7
2659,10,20978.0,444,Leisure,1970-01-01 11:24:00,28.44,1872.0,27.7
2660,3,7338.0,444,Housework,1970-01-01 11:24:00,9.95,672.0,9.94
2661,11,4082.0,444,Travel and Other,1970-01-01 11:24:00,5.53,437.0,6.47
2662,4,2118.0,444,Child Care,1970-01-01 11:24:00,2.87,145.0,2.15
2663,5,468.0,444,Adult Care,1970-01-01 11:24:00,0.63,138.0,2.04
2664,6,38773.0,445,Work and Education,1970-01-01 11:25:00,52.57,3494.0,51.7
2665,10,20990.0,445,Leisure,1970-01-01 11:25:00,28.46,1876.0,27.76
2666,3,7341.0,445,Housework,1970-01-01 11:25:00,9.95,670.0,9.91
2667,11,4067.0,445,Travel and Other,1970-01-01 11:25:00,5.51,435.0,6.44
2668,4,2118.0,445,Child Care,1970-01-01 11:25:00,2.87,145.0,2.15
2669,5,469.0,445,Adult Care,1970-01-01 11:25:00,0.64,138.0,2.04
2670,6,38815.0,446,Work and Education,1970-01-01 11:26:00,52.62,3500.0,51.79
2671,10,21041.0,446,Leisure,1970-01-01 11:26:00,28.53,1883.0,27.86
2672,3,7324.0,446,Housework,1970-01-01 11:26:00,9.93,684.0,10.12
2673,11,3974.0,446,Travel and Other,1970-01-01 11:26:00,5.39,414.0,6.13
2674,4,2130.0,446,Child Care,1970-01-01 11:26:00,2.89,143.0,2.12
2675,5,474.0,446,Adult Care,1970-01-01 11:26:00,0.64,134.0,1.98
2676,6,38819.0,447,Work and Education,1970-01-01 11:27:00,52.63,3502.0,51.82
2677,10,21038.0,447,Leisure,1970-01-01 11:27:00,28.52,1881.0,27.83
2678,3,7315.0,447,Housework,1970-01-01 11:27:00,9.92,684.0,10.12
2679,11,3991.0,447,Travel and Other,1970-01-01 11:27:00,5.41,416.0,6.16
2680,4,2124.0,447,Child Care,1970-01-01 11:27:00,2.88,142.0,2.1
2681,5,471.0,447,Adult Care,1970-01-01 11:27:00,0.64,133.0,1.97
2682,6,38820.0,448,Work and Education,1970-01-01 11:28:00,52.63,3502.0,51.82
2683,10,21025.0,448,Leisure,1970-01-01 11:28:00,28.51,1883.0,27.86
2684,3,7326.0,448,Housework,1970-01-01 11:28:00,9.93,680.0,10.06
2685,11,3992.0,448,Travel and Other,1970-01-01 11:28:00,5.41,419.0,6.2
2686,4,2125.0,448,Child Care,1970-01-01 11:28:00,2.88,142.0,2.1
2687,5,470.0,448,Adult Care,1970-01-01 11:28:00,0.64,132.0,1.95
2688,6,38823.0,449,Work and Education,1970-01-01 11:29:00,52.64,3502.0,51.82
2689,10,21029.0,449,Leisure,1970-01-01 11:29:00,28.51,1883.0,27.86
2690,3,7329.0,449,Housework,1970-01-01 11:29:00,9.94,679.0,10.05
2691,11,3982.0,449,Travel and Other,1970-01-01 11:29:00,5.4,420.0,6.21
2692,4,2124.0,449,Child Care,1970-01-01 11:29:00,2.88,142.0,2.1
2693,5,471.0,449,Adult Care,1970-01-01 11:29:00,0.64,132.0,1.95
2694,6,38821.0,450,Work and Education,1970-01-01 11:30:00,52.63,3504.0,51.85
2695,10,21037.0,450,Leisure,1970-01-01 11:30:00,28.52,1878.0,27.79
2696,3,7330.0,450,Housework,1970-01-01 11:30:00,9.94,680.0,10.06
2697,11,3975.0,450,Travel and Other,1970-01-01 11:30:00,5.39,422.0,6.24
2698,4,2126.0,450,Child Care,1970-01-01 11:30:00,2.88,140.0,2.07
2699,5,469.0,450,Adult Care,1970-01-01 11:30:00,0.64,134.0,1.98
2700,6,36295.0,451,Work and Education,1970-01-01 11:31:00,49.21,3224.0,47.71
2701,10,22164.0,451,Leisure,1970-01-01 11:31:00,30.05,2008.0,29.71
2702,3,7303.0,451,Housework,1970-01-01 11:31:00,9.9,689.0,10.2
2703,11,5466.0,451,Travel and Other,1970-01-01 11:31:00,7.41,570.0,8.43
2704,4,2051.0,451,Child Care,1970-01-01 11:31:00,2.78,132.0,1.95
2705,5,479.0,451,Adult Care,1970-01-01 11:31:00,0.65,135.0,2.0
2706,6,36297.0,452,Work and Education,1970-01-01 11:32:00,49.21,3226.0,47.74
2707,10,22194.0,452,Leisure,1970-01-01 11:32:00,30.09,2011.0,29.76
2708,3,7299.0,452,Housework,1970-01-01 11:32:00,9.9,685.0,10.14
2709,11,5437.0,452,Travel and Other,1970-01-01 11:32:00,7.37,567.0,8.39
2710,4,2054.0,452,Child Care,1970-01-01 11:32:00,2.78,133.0,1.97
2711,5,477.0,452,Adult Care,1970-01-01 11:32:00,0.65,136.0,2.01
2712,6,36309.0,453,Work and Education,1970-01-01 11:33:00,49.23,3228.0,47.77
2713,10,22242.0,453,Leisure,1970-01-01 11:33:00,30.16,2024.0,29.95
2714,3,7323.0,453,Housework,1970-01-01 11:33:00,9.93,683.0,10.11
2715,11,5360.0,453,Travel and Other,1970-01-01 11:33:00,7.27,554.0,8.2
2716,4,2042.0,453,Child Care,1970-01-01 11:33:00,2.77,133.0,1.97
2717,5,482.0,453,Adult Care,1970-01-01 11:33:00,0.65,136.0,2.01
2718,6,36318.0,454,Work and Education,1970-01-01 11:34:00,49.24,3232.0,47.82
2719,10,22290.0,454,Leisure,1970-01-01 11:34:00,30.22,2030.0,30.04
2720,3,7323.0,454,Housework,1970-01-01 11:34:00,9.93,681.0,10.08
2721,11,5314.0,454,Travel and Other,1970-01-01 11:34:00,7.2,546.0,8.08
2722,4,2037.0,454,Child Care,1970-01-01 11:34:00,2.76,134.0,1.98
2723,5,476.0,454,Adult Care,1970-01-01 11:34:00,0.65,135.0,2.0
2724,6,36328.0,455,Work and Education,1970-01-01 11:35:00,49.25,3233.0,47.84
2725,10,22318.0,455,Leisure,1970-01-01 11:35:00,30.26,2035.0,30.11
2726,3,7324.0,455,Housework,1970-01-01 11:35:00,9.93,681.0,10.08
2727,11,5273.0,455,Travel and Other,1970-01-01 11:35:00,7.15,543.0,8.03
2728,4,2038.0,455,Child Care,1970-01-01 11:35:00,2.76,132.0,1.95
2729,5,477.0,455,Adult Care,1970-01-01 11:35:00,0.65,134.0,1.98
2730,6,36371.0,456,Work and Education,1970-01-01 11:36:00,49.31,3247.0,48.05
2731,10,22725.0,456,Leisure,1970-01-01 11:36:00,30.81,2078.0,30.75
2732,3,7268.0,456,Housework,1970-01-01 11:36:00,9.85,677.0,10.02
2733,11,4858.0,456,Travel and Other,1970-01-01 11:36:00,6.59,488.0,7.22
2734,4,2043.0,456,Child Care,1970-01-01 11:36:00,2.77,134.0,1.98
2735,5,493.0,456,Adult Care,1970-01-01 11:36:00,0.67,134.0,1.98
2736,6,36371.0,457,Work and Education,1970-01-01 11:37:00,49.31,3247.0,48.05
2737,10,22746.0,457,Leisure,1970-01-01 11:37:00,30.84,2078.0,30.75
2738,3,7272.0,457,Housework,1970-01-01 11:37:00,9.86,679.0,10.05
2739,11,4835.0,457,Travel and Other,1970-01-01 11:37:00,6.56,486.0,7.19
2740,4,2043.0,457,Child Care,1970-01-01 11:37:00,2.77,134.0,1.98
2741,5,491.0,457,Adult Care,1970-01-01 11:37:00,0.67,134.0,1.98
2742,6,36380.0,458,Work and Education,1970-01-01 11:38:00,49.32,3240.0,47.94
2743,10,22769.0,458,Leisure,1970-01-01 11:38:00,30.87,2084.0,30.84
2744,3,7258.0,458,Housework,1970-01-01 11:38:00,9.84,679.0,10.05
2745,11,4828.0,458,Travel and Other,1970-01-01 11:38:00,6.55,485.0,7.18
2746,4,2041.0,458,Child Care,1970-01-01 11:38:00,2.77,136.0,2.01
2747,5,482.0,458,Adult Care,1970-01-01 11:38:00,0.65,134.0,1.98
2748,6,36390.0,459,Work and Education,1970-01-01 11:39:00,49.34,3242.0,47.97
2749,10,22784.0,459,Leisure,1970-01-01 11:39:00,30.89,2088.0,30.9
2750,3,7267.0,459,Housework,1970-01-01 11:39:00,9.85,679.0,10.05
2751,11,4802.0,459,Travel and Other,1970-01-01 11:39:00,6.51,479.0,7.09
2752,4,2035.0,459,Child Care,1970-01-01 11:39:00,2.76,136.0,2.01
2753,5,480.0,459,Adult Care,1970-01-01 11:39:00,0.65,134.0,1.98
2754,6,36397.0,460,Work and Education,1970-01-01 11:40:00,49.35,3242.0,47.97
2755,10,22794.0,460,Leisure,1970-01-01 11:40:00,30.9,2092.0,30.96
2756,3,7262.0,460,Housework,1970-01-01 11:40:00,9.85,678.0,10.03
2757,11,4791.0,460,Travel and Other,1970-01-01 11:40:00,6.5,477.0,7.06
2758,4,2035.0,460,Child Care,1970-01-01 11:40:00,2.76,136.0,2.01
2759,5,479.0,460,Adult Care,1970-01-01 11:40:00,0.65,133.0,1.97
2760,6,36478.0,461,Work and Education,1970-01-01 11:41:00,49.46,3245.0,48.02
2761,10,23079.0,461,Leisure,1970-01-01 | |
= self.refresh_token
return result
def from_map(self, map={}):
if map.get('headers') is not None:
self.headers = map.get('headers')
if map.get('addition_data') is not None:
self.addition_data = map.get('addition_data')
if map.get('app_id') is not None:
self.app_id = map.get('app_id')
if map.get('grant_type') is not None:
self.grant_type = map.get('grant_type')
if map.get('refresh_token') is not None:
self.refresh_token = map.get('refresh_token')
return self
class UpdateDriveResponse(TeaModel):
"""
Update drive response
"""
def __init__(self, creator=None, description=None, domain_id=None, drive_id=None, drive_name=None,
drive_type=None, encrypt_data_access=None, encrypt_mode=None, owner=None, relative_path=None, status=None,
store_id=None, total_size=None, used_size=None):
# Drive 创建者
self.creator = creator # type: str
# Drive 备注信息
self.description = description # type: str
# Domain ID
self.domain_id = domain_id # type: str
# Drive ID
self.drive_id = drive_id # type: str
# Drive 名称
self.drive_name = drive_name # type: str
# Drive 类型
self.drive_type = drive_type # type: str
self.encrypt_data_access = encrypt_data_access # type: bool
self.encrypt_mode = encrypt_mode # type: str
# Drive 所有者
self.owner = owner # type: str
# Drive存储基于store的相对路径,domain的PathType为OSSPath时返回
self.relative_path = relative_path # type: str
# Drive 状态
self.status = status # type: str
# 存储 ID, domain的PathType为OSSPath时返回
self.store_id = store_id # type: str
# Drive 空间总量
self.total_size = total_size # type: int
# Drive 空间已使用量
self.used_size = used_size # type: int
def validate(self):
pass
def to_map(self):
result = {}
if self.creator is not None:
result['creator'] = self.creator
if self.description is not None:
result['description'] = self.description
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.drive_name is not None:
result['drive_name'] = self.drive_name
if self.drive_type is not None:
result['drive_type'] = self.drive_type
if self.encrypt_data_access is not None:
result['encrypt_data_access'] = self.encrypt_data_access
if self.encrypt_mode is not None:
result['encrypt_mode'] = self.encrypt_mode
if self.owner is not None:
result['owner'] = self.owner
if self.relative_path is not None:
result['relative_path'] = self.relative_path
if self.status is not None:
result['status'] = self.status
if self.store_id is not None:
result['store_id'] = self.store_id
if self.total_size is not None:
result['total_size'] = self.total_size
if self.used_size is not None:
result['used_size'] = self.used_size
return result
def from_map(self, map={}):
if map.get('creator') is not None:
self.creator = map.get('creator')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('drive_name') is not None:
self.drive_name = map.get('drive_name')
if map.get('drive_type') is not None:
self.drive_type = map.get('drive_type')
if map.get('encrypt_data_access') is not None:
self.encrypt_data_access = map.get('encrypt_data_access')
if map.get('encrypt_mode') is not None:
self.encrypt_mode = map.get('encrypt_mode')
if map.get('owner') is not None:
self.owner = map.get('owner')
if map.get('relative_path') is not None:
self.relative_path = map.get('relative_path')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('store_id') is not None:
self.store_id = map.get('store_id')
if map.get('total_size') is not None:
self.total_size = map.get('total_size')
if map.get('used_size') is not None:
self.used_size = map.get('used_size')
return self
class UpdateFileMetaResponse(TeaModel):
"""
更新文件元数据 response
"""
def __init__(self, category=None, content_hash=None, content_hash_name=None, content_type=None, crc_64hash=None,
created_at=None, description=None, domain_id=None, download_url=None, drive_id=None, encrypt_mode=None,
file_extension=None, file_id=None, hidden=None, image_media_metadata=None, labels=None, meta=None, name=None,
parent_file_id=None, punish_flag=None, size=None, starred=None, status=None, streams_info=None, thumbnail=None,
trashed_at=None, type=None, updated_at=None, upload_id=None, url=None, user_meta=None,
video_media_metadata=None, video_preview_metadata=None):
# category
self.category = category # type: str
# Content Hash
self.content_hash = content_hash # type: str
# content_hash_name
self.content_hash_name = content_hash_name # type: str
# content_type
self.content_type = content_type # type: str
# crc64_hash
self.crc_64hash = crc_64hash # type: str
# created_at
self.created_at = created_at # type: str
# description
self.description = description # type: str
# DomainID
self.domain_id = domain_id # type: str
# download_url
self.download_url = download_url # type: str
# drive_id
self.drive_id = drive_id # type: str
# encrypt_mode
self.encrypt_mode = encrypt_mode # type: str
# file_extension
self.file_extension = file_extension # type: str
# file_id
self.file_id = file_id # type: str
# Hidden
# type: boolean
self.hidden = hidden # type: bool
self.image_media_metadata = image_media_metadata # type: ImageMediaResponse
# labels
self.labels = labels # type: List[str]
self.meta = meta # type: str
# name
self.name = name # type: str
# parent_file_id
self.parent_file_id = parent_file_id # type: str
self.punish_flag = punish_flag # type: int
# Size
self.size = size # type: int
# starred
# type: boolean
self.starred = starred # type: bool
# status
self.status = status # type: str
# @Deprecated streams url info
self.streams_info = streams_info # type: dict
# thumbnail
self.thumbnail = thumbnail # type: str
# trashed_at
self.trashed_at = trashed_at # type: str
# type
self.type = type # type: str
# updated_at
self.updated_at = updated_at # type: str
# upload_id
self.upload_id = upload_id # type: str
# url
self.url = url # type: str
# user_meta
self.user_meta = user_meta # type: str
self.video_media_metadata = video_media_metadata # type: VideoMediaResponse
self.video_preview_metadata = video_preview_metadata # type: VideoPreviewResponse
def validate(self):
if self.domain_id is not None:
self.validate_pattern(self.domain_id, 'domain_id', '[a-z0-9A-Z]+')
if self.drive_id is not None:
self.validate_pattern(self.drive_id, 'drive_id', '[0-9]+')
if self.file_id is not None:
self.validate_max_length(self.file_id, 'file_id', 50)
self.validate_pattern(self.file_id, 'file_id', '[a-z0-9]{1,50}')
if self.image_media_metadata:
self.image_media_metadata.validate()
self.validate_required(self.name, 'name')
if self.name is not None:
self.validate_pattern(self.name, 'name', '[a-zA-Z0-9.-]{1,1000}')
if self.parent_file_id is not None:
self.validate_max_length(self.parent_file_id, 'parent_file_id', 50)
self.validate_pattern(self.parent_file_id, 'parent_file_id', '[a-z0-9]{1,50}')
if self.size is not None:
self.validate_maximum(self.size, 'size', 53687091200)
self.validate_minimum(self.size, 'size', 0)
if self.video_media_metadata:
self.video_media_metadata.validate()
if self.video_preview_metadata:
self.video_preview_metadata.validate()
def to_map(self):
result = {}
if self.category is not None:
result['category'] = self.category
if self.content_hash is not None:
result['content_hash'] = self.content_hash
if self.content_hash_name is not None:
result['content_hash_name'] = self.content_hash_name
if self.content_type is not None:
result['content_type'] = self.content_type
if self.crc_64hash is not None:
result['crc64_hash'] = self.crc_64hash
if self.created_at is not None:
result['created_at'] = self.created_at
if self.description is not None:
result['description'] = self.description
if self.domain_id is not None:
result['domain_id'] = self.domain_id
if self.download_url is not None:
result['download_url'] = self.download_url
if self.drive_id is not None:
result['drive_id'] = self.drive_id
if self.encrypt_mode is not None:
result['encrypt_mode'] = self.encrypt_mode
if self.file_extension is not None:
result['file_extension'] = self.file_extension
if self.file_id is not None:
result['file_id'] = self.file_id
if self.hidden is not None:
result['hidden'] = self.hidden
if self.image_media_metadata is not None:
result['image_media_metadata'] = self.image_media_metadata.to_map()
if self.labels is not None:
result['labels'] = self.labels
if self.meta is not None:
result['meta'] = self.meta
if self.name is not None:
result['name'] = self.name
if self.parent_file_id is not None:
result['parent_file_id'] = self.parent_file_id
if self.punish_flag is not None:
result['punish_flag'] = self.punish_flag
if self.size is not None:
result['size'] = self.size
if self.starred is not None:
result['starred'] = self.starred
if self.status is not None:
result['status'] = self.status
if self.streams_info is not None:
result['streams_info'] = self.streams_info
if self.thumbnail is not None:
result['thumbnail'] = self.thumbnail
if self.trashed_at is not None:
result['trashed_at'] = self.trashed_at
if self.type is not None:
result['type'] = self.type
if self.updated_at is not None:
result['updated_at'] = self.updated_at
if self.upload_id is not None:
result['upload_id'] = self.upload_id
if self.url is not None:
result['url'] = self.url
if self.user_meta is not None:
result['user_meta'] = self.user_meta
if self.video_media_metadata is not None:
result['video_media_metadata'] = self.video_media_metadata.to_map()
if self.video_preview_metadata is not None:
result['video_preview_metadata'] = self.video_preview_metadata.to_map()
return result
def from_map(self, map={}):
if map.get('category') is not None:
self.category = map.get('category')
if map.get('content_hash') is not None:
self.content_hash = map.get('content_hash')
if map.get('content_hash_name') is not None:
self.content_hash_name = map.get('content_hash_name')
if map.get('content_type') is not None:
self.content_type = map.get('content_type')
if map.get('crc64_hash') is not None:
self.crc_64hash = map.get('crc64_hash')
if map.get('created_at') is not None:
self.created_at = map.get('created_at')
if map.get('description') is not None:
self.description = map.get('description')
if map.get('domain_id') is not None:
self.domain_id = map.get('domain_id')
if map.get('download_url') is not None:
self.download_url = map.get('download_url')
if map.get('drive_id') is not None:
self.drive_id = map.get('drive_id')
if map.get('encrypt_mode') is not None:
self.encrypt_mode = map.get('encrypt_mode')
if map.get('file_extension') is not None:
self.file_extension = map.get('file_extension')
if map.get('file_id') is not None:
self.file_id = map.get('file_id')
if map.get('hidden') is not None:
self.hidden = map.get('hidden')
if map.get('image_media_metadata') is not None:
temp_model = ImageMediaResponse()
self.image_media_metadata = temp_model.from_map(map['image_media_metadata'])
if map.get('labels') is not None:
self.labels = map.get('labels')
if map.get('meta') is not None:
self.meta = map.get('meta')
if map.get('name') is not None:
self.name = map.get('name')
if map.get('parent_file_id') is not None:
self.parent_file_id = map.get('parent_file_id')
if map.get('punish_flag') is not None:
self.punish_flag = map.get('punish_flag')
if map.get('size') is not None:
self.size = map.get('size')
if map.get('starred') is not None:
self.starred = map.get('starred')
if map.get('status') is not None:
self.status = map.get('status')
if map.get('streams_info') is not None:
self.streams_info = map.get('streams_info')
if map.get('thumbnail') is not None:
self.thumbnail = | |
4*' ', [
'return {crc_reflect_function}({cfg_xor_in} & {cfg_mask}, {cfg_width});'.format(**sym),
]),
'} else {',
CodeGen(opt, 4*' ', [
'return {cfg_xor_in} & {cfg_mask};'.format(**sym),
]),
'}',
], [
Conditional2(opt, '', opt.algorithm == opt.reflect_in, [
'return {crc_reflect_function}({cfg_xor_in} & {cfg_mask}, {cfg_width});'.format(**sym),
], [
'return {cfg_xor_in} & {cfg_mask};'.format(**sym),
]),
]),
]),
]),
'}',
]
return out
def _crc_update_function_gen(opt, sym):
"""
Return the code for the update function.
"""
out = [
'', '',
_crc_update_function_def(opt, sym),
'{',
CodeGen(opt, 4*' ', [ 'const unsigned char *d = (const unsigned char *)data;' ]),
]
if opt.algorithm == opt.algo_bit_by_bit:
out += [
CodeGen(opt, 4*' ', [
'unsigned int i;',
'{c_bool} bit;'.format(**sym),
'unsigned char c;',
'',
'while (data_len--) {',
Conditional2(opt, 4*' ', opt.reflect_in is None, [
'if (' + sym['cfg_reflect_in'] + ') {',
CodeGen(opt, 4*' ', [
'c = {crc_reflect_function}(*d++, 8);'.format(**sym),
]),
'} else {',
CodeGen(opt, 4*' ', [
'c = *d++;',
]),
'}',
], [
Conditional2(opt, '', opt.reflect_in, [
'c = {crc_reflect_function}(*d++, 8);'.format(**sym),
], [
'c = *d++;',
]),
]),
CodeGen(opt, 4*' ', [
'for (i = 0; i < 8; i++) {',
CodeGen(opt, 4*' ', [
Conditional2(opt, '', opt.c_std == 'C89', [
'bit = !!(crc & {cfg_msb_mask});'.format(**sym),
], [
'bit = crc & {cfg_msb_mask};'.format(**sym),
]),
'crc = (crc << 1) | ((c >> (7 - i)) & 0x01);',
'if (bit) {',
CodeGen(opt, 4*' ', [
'crc ^= {cfg_poly};'.format(**sym),
]),
'}',
]),
'}',
'crc &= {cfg_mask};'.format(**sym),
]),
'}',
'return crc & {cfg_mask};'.format(**sym),
]),
]
if opt.algorithm == opt.algo_bit_by_bit_fast:
out += [
CodeGen(opt, 4*' ', [
'unsigned int i;',
'{c_bool} bit;'.format(**sym),
'unsigned char c;',
'',
'while (data_len--) {',
CodeGen(opt, 4*' ', [
Conditional2(opt, '', opt.reflect_in == None, [
'if (' + sym['cfg_reflect_in'] + ') {',
CodeGen(opt, 4*' ', [
'c = {crc_reflect_function}(*d++, 8);'.format(**sym),
]),
'} else {',
CodeGen(opt, 4*' ', [
'c = *d++;',
]),
'}',
], [
'c = *d++;',
]),
Conditional2(opt, '', opt.reflect_in, [
'for (i = 0x01; i & 0xff; i <<= 1) {',
], [
'for (i = 0x80; i > 0; i >>= 1) {',
]),
CodeGen(opt, 4*' ', [
Conditional2(opt, '', opt.c_std == 'C89', [
'bit = !!({0});'.format(expr.And('crc', sym['cfg_msb_mask']).simplify()),
], [
'bit = {0};'.format(expr.And('crc', sym['cfg_msb_mask']).simplify()),
]),
'if (c & i) {',
CodeGen(opt, 4*' ', [
'bit = !bit;',
]),
'}',
'crc <<= 1;',
'if (bit) {',
CodeGen(opt, 4*' ', [
'crc ^= {cfg_poly};'.format(**sym),
]),
'}',
]),
'}',
'crc &= {cfg_mask};'.format(**sym)
]),
'}',
'return {0};'.format(expr.And('crc', sym['cfg_mask']).simplify()),
]),
]
if opt.algorithm == opt.algo_table_driven:
out += [
CodeGen(opt, 4*' ', [
'unsigned int tbl_idx;',
'',
Conditional2(opt, '', opt.reflect_in == None, [
'if (cfg->reflect_in) {',
CodeGen(opt, 4*' ', [
'while (data_len--) {',
CodeGen(opt, 4*' ', [
_crc_table_core_algorithm_reflected(opt, sym),
'd++;',
]),
'}',
]),
'} else {',
CodeGen(opt, 4*' ', [
'while (data_len--) {',
CodeGen(opt, 4*' ', [
_crc_table_core_algorithm_nonreflected(opt, sym),
'd++;',
]),
'}',
]),
'}',
], [
Conditional(opt, '', opt.slice_by > 1, [
'/* Align to a multiple of {crc_slice_by} bytes */'.format(**sym),
'while (data_len && (((uintptr_t)(const void *)d) % {crc_slice_by} != 0))'.format(**sym) + ' {',
CodeGen(opt, 4*' ', [
_crc_table_core_algorithm(opt, sym),
'data_len--;',
]),
'}',
'',
_crc_table_slice_by_algorithm(opt, sym),
'/* Remaining bytes with the standard algorithm */',
'd = (const unsigned char *)d32;',
]),
'while (data_len--) {',
CodeGen(opt, 4*' ', [
_crc_table_core_algorithm(opt, sym),
]),
'}',
]),
'return {0};'.format(expr.And('crc', sym['cfg_mask']).simplify()),
]),
]
out += [
'}',
]
return out
def _crc_finalize_function_gen(opt, sym):
"""
Return the code for the finalize function.
"""
if _use_inline_crc_finalize(opt):
return []
out = [
'', '',
_crc_finalize_function_def(opt, sym),
'{',
]
if opt.algorithm in set([opt.algo_bit_by_bit, opt.algo_bit_by_bit_fast]):
out += [
Conditional(opt, 4*' ', opt.algorithm == opt.algo_bit_by_bit, [
'unsigned int i;',
'{c_bool} bit;'.format(**sym),
'',
'for (i = 0; i < ' + sym['cfg_width'] + '; i++) {',
CodeGen(opt, 4*' ', [
Conditional2(opt, '', opt.c_std == 'C89', [
'bit = !!(crc & {cfg_msb_mask});'.format(**sym)
], [
'bit = crc & {cfg_msb_mask};'.format(**sym),
]),
'crc <<= 1;',
'if (bit) {',
CodeGen(opt, 4*' ', [
'crc ^= {cfg_poly};'.format(**sym),
]),
'}',
]),
'}',
Conditional(opt, '', opt.reflect_out is None, [
'if (' + sym['cfg_reflect_out'] + ') {',
CodeGen(opt, 4*' ', [
'crc = {crc_reflect_function}(crc, {cfg_width});'.format(**sym),
]),
'}',
]),
Conditional(opt, '', opt.reflect_out, [
'crc = {crc_reflect_function}(crc, {cfg_width});'.format(**sym),
]),
]),
Conditional(opt, 4*' ', opt.algorithm == opt.algo_bit_by_bit_fast, [
Conditional(opt, '', opt.reflect_out is None, [
'if (' + sym['cfg_reflect_out'] + ') {',
CodeGen(opt, 4*' ', [
'crc = {crc_reflect_function}(crc, {cfg_width});'.format(**sym),
]),
'}',
]),
Conditional(opt, '', opt.reflect_out, [
'crc = {crc_reflect_function}(crc, {cfg_width});'.format(**sym),
]),
]),
]
if opt.algorithm == opt.algo_table_driven:
if opt.reflect_in is None or opt.reflect_out is None:
if opt.reflect_in is None and opt.reflect_out is None:
cond = 'cfg->reflect_in != cfg->reflect_out'
elif opt.reflect_out is None:
cond = '!' if opt.reflect_in else '' + 'cfg->reflect_out'
else:
cond = '!' if opt.reflect_out else '' + 'cfg->reflect_in'
out += [
CodeGen(opt, 4*' ', [
'if (' + cond + ') {',
CodeGen(opt, 4*' ', [
'crc = {crc_reflect_function}(crc, {cfg_width});'.format(**sym),
]),
'}',
]),
]
elif opt.reflect_in != opt.reflect_out:
out += [
'crc = {crc_reflect_function}(crc, {cfg_width});'.format(**sym),
]
out += [
CodeGen(opt, 4*' ', [
'return {0};'.format(expr.And(expr.Parenthesis(expr.Xor('crc', sym['cfg_xor_out'])), sym['cfg_mask']).simplify()),
]),
'}',
]
return out
def _crc_table_core_algorithm(opt, sym):
"""
Return the core of the table-driven algorithm.
"""
out = []
out += [
Conditional2(opt, '', opt.reflect_in, [
_crc_table_core_algorithm_reflected(opt, sym),
], [
_crc_table_core_algorithm_nonreflected(opt, sym),
]),
'd++;',
]
return CodeGen(opt, '', out)
def _crc_table_core_algorithm_reflected(opt, sym):
"""
Return the core loop of the table-driven algorithm, reflected variant.
"""
out = []
if opt.width is not None and opt.tbl_idx_width is not None and opt.width <= opt.tbl_idx_width:
crc_xor_expr = '0'
else:
crc_xor_expr = '(crc >> {cfg_table_idx_width})'.format(**sym)
if opt.tbl_idx_width == 8:
if opt.slice_by > 1:
crc_lookup = 'crc_table[0][tbl_idx]'
else:
crc_lookup = 'crc_table[tbl_idx]'
out += [
Conditional2(opt, '', opt.width is None or opt.width > 8, [
'tbl_idx = (crc ^ *d) & {crc_table_mask};'.format(**sym),
], [
'tbl_idx = crc ^ *d;',
]),
'crc = {0};'.format(expr.And(expr.Parenthesis(expr.Xor(crc_lookup, expr.Parenthesis(expr.Shr('crc', sym['cfg_table_idx_width'])))), sym['cfg_mask']).simplify()),
]
else:
crc_lookup = 'crc_table[tbl_idx & {crc_table_mask}]'.format(**sym)
for i in range(8 // opt.tbl_idx_width):
out += [
'tbl_idx = {0};'.format(expr.Xor('crc', expr.Parenthesis(expr.Shr('*d', expr.Parenthesis(expr.Mul(i, sym['cfg_table_idx_width']))))).simplify()),
'crc = {0};'.format(expr.Xor(crc_lookup, crc_xor_expr).simplify())
]
return CodeGen(opt, '', out)
def _crc_table_core_algorithm_nonreflected(opt, sym):
"""
Return the core loop of the table-driven algorithm, non-reflected variant.
"""
out = []
if opt.width == None:
crc_shifted_right = expr.Parenthesis(expr.Shr('crc', expr.Parenthesis(expr.Sub(sym['cfg_width'], sym['cfg_table_idx_width'])))).simplify()
elif opt.width < 8:
shift_val = opt.width - opt.tbl_idx_width
if shift_val < 0:
crc_shifted_right = expr.Parenthesis(expr.Shl('crc', -shift_val)).simplify()
else:
crc_shifted_right = expr.Parenthesis(expr.Shr('crc', shift_val)).simplify()
else:
shift_val = opt.width - opt.tbl_idx_width
crc_shifted_right = expr.Parenthesis(expr.Shr('crc', shift_val)).simplify()
if opt.width is not None and opt.tbl_idx_width is not None and opt.width <= opt.tbl_idx_width:
crc_xor_expr = '0'
else:
crc_xor_expr = '(crc << {cfg_table_idx_width})'.format(**sym)
if opt.tbl_idx_width == 8:
if opt.slice_by > 1:
crc_lookup = 'crc_table[0][tbl_idx]'
else:
crc_lookup = 'crc_table[tbl_idx]'
out += [
Conditional2(opt, '', opt.width is None or opt.width > 8, [
'tbl_idx = {0};'.format(expr.And(expr.Parenthesis(expr.Xor(crc_shifted_right, '*d')), sym['crc_table_mask']).simplify())
], [
'tbl_idx = {0};'.format(expr.Xor(crc_shifted_right, '*d').simplify())
]),
'crc = {0};'.format(expr.And(expr.Parenthesis(expr.Xor(crc_lookup, crc_xor_expr)), sym['cfg_mask']).simplify())
]
else:
crc_lookup = 'crc_table[tbl_idx & {crc_table_mask}]'.format(**sym)
for i in range(8 // opt.tbl_idx_width):
str_idx = '{0:d}'.format(8 - (i + 1) * opt.tbl_idx_width)
out += [
'tbl_idx = {0};'.format(expr.Xor(crc_shifted_right, expr.Parenthesis(expr.Shr('*d', str_idx)))),
'crc = {0};'.format(expr.Xor(crc_lookup, crc_xor_expr).simplify()),
]
return CodeGen(opt, '', out)
def _crc_table_slice_by_algorithm(opt, sym):
update_be = []
for i in range(opt.slice_by // 4):
vard = 'd{0}'.format(opt.slice_by // 4 - i)
for j in range(4):
idx1 = i * 4 + j
idx2 = expr.And(expr.Parenthesis(expr.Shr(vard, j*8)), expr.Terminal(255, '0xffu')).simplify()
update_be.append('crc_table[{0}][{1}]{2}'.format(idx1, idx2, ' ^' if idx1 < opt.slice_by - 1 else ';'))
update_le = []
for i in range(opt.slice_by // 4):
vard = 'd{0}'.format(opt.slice_by // 4 - i)
for j in range(4):
idx1 = i * 4 + j
idx2 = expr.And(expr.Parenthesis(expr.Shr(vard, 24 - j*8)), expr.Terminal(255, '0xffu')).simplify()
update_le.append('crc_table[{0}][{1}]{2}'.format(idx1, idx2, ' ^' if idx1 < opt.slice_by - 1 else ';'))
out = [
'const uint32_t *d32 = (const uint32_t *)d;',
'while (data_len >= {crc_slice_by})'.format(**sym),
'{',
CodeGen(opt, 4*' ', [
CodeGen(opt, None, [
'#if __BYTE_ORDER == __BIG_ENDIAN',
]),
'{crc_t} d1 = *d32++ ^ le16toh(crc);'.format(**sym),
Conditional(opt, '', opt.slice_by >= 8, [
'{crc_t} d2 = *d32++;'.format(**sym),
]),
Conditional(opt, '', opt.slice_by >= 16, [
'{crc_t} d3 = *d32++;'.format(**sym),
'{crc_t} d4 = *d32++;'.format(**sym),
| |
'action': self.nodeurl
})
form['id'] = factory(
'field:label:text',
props={
'label': 'Id',
})
form['title'] = factory(
'field:label:text',
props={
'label': 'Title',
})
form['add'] = factory(
'submit',
props={
'action': 'add',
'expression': True,
'handler': self.add,
'next': self.next,
'label': 'Add',
})
self.form = form
def add(self, widget, data):
fetch = self.request.params.get
child = MyNode()
child.attrs.title = fetch('addform.title')
self.model.parent[fetch('addform.id')] = child
self.model = child
# Create dummy container
root = MyNode()
# Render without factory
with self.layer.authenticated('manager'):
request = self.layer.new_request()
self.assertEqual(
render_tile(root, request, 'add'),
u'unknown_factory'
)
# Render with valid factory
with self.layer.authenticated('manager'):
request.params['factory'] = 'mynode'
result = render_tile(root, request, 'add')
self.assertTrue(result.find(u'<form action="http://example.com"') != -1)
# Render with valid factory on adapter node
with self.layer.authenticated('manager'):
adapterroot = MyAdapterNode(None, None, None)
request.params['factory'] = 'myadapternode'
result = render_tile(adapterroot, request, 'add')
self.assertTrue(result.find(u'<form action="http://example.com"') != -1)
# Render with submitted data
with self.layer.authenticated('manager'):
request = self.layer.current_request
request.params['factory'] = 'mynode'
request.params['action.addform.add'] = '1'
request.params['addform.id'] = 'somechild'
request.params['addform.title'] = 'Some Child'
render_tile(root, request, 'add')
self.assertTrue(isinstance(request.environ['redirect'], HTTPFound))
self.checkOutput("""
<class '...MyNode'>: None
<class '...MyNode'>: somechild
""", root.treerepr())
self.assertEqual(
request.environ['redirect'].location,
'http://example.com/somechild'
)
del request.environ['redirect']
# Render with 'came_from' set
with self.layer.authenticated('manager'):
request.params['came_from'] = 'parent'
render_tile(root, request, 'add')
self.assertEqual(
request.environ['redirect'].location,
'http://example.com/'
)
del request.environ['redirect']
with self.layer.authenticated('manager'):
came_from = compat.quote('http://example.com/foo/bar?baz=1')
request.params['came_from'] = came_from
render_tile(root, request, 'add')
self.assertEqual(
request.environ['redirect'].location,
'http://example.com/foo/bar?baz=1'
)
# Render with ajax flag
with self.layer.authenticated('manager'):
request.params['ajax'] = '1'
render_tile(root, request, 'add')
self.assertTrue(isinstance(
request.environ['cone.app.continuation'][0],
AjaxEvent
))
# Check the modified model
self.assertEqual(root.keys(), ['somechild'])
self.assertEqual(root['somechild'].attrs.title, 'Some Child')
# Add view
with self.layer.authenticated('manager'):
request = self.layer.new_request()
request.params['factory'] = 'mynode'
request.params['action.addform.add'] = '1'
request.params['addform.id'] = 'somechild'
request.params['addform.title'] = 'Some Child'
res = add(root, request)
self.assertTrue(isinstance(res, HTTPFound))
with self.layer.authenticated('manager'):
request.params['ajax'] = '1'
res = str(add(root, request))
self.assertTrue(res.find('parent.bdajax.render_ajax_form') != -1)
@testing.reset_node_info_registry
def test_EditFormHeading(self):
@plumbing(EditFormHeading)
class EditForm(Form):
pass
self.assertEqual(BaseNode().node_info_name, '')
self.assertEqual(get_node_info(''), None)
edit_form = EditForm()
edit_form.model = BaseNode()
edit_form.request = self.layer.new_request()
self.assertEqual(edit_form.form_heading, 'edit')
@node_info(
name='editnode',
title='Edit Node')
class EditNode(BaseNode):
pass
edit_form = EditForm()
edit_form.model = EditNode()
edit_form.request = self.layer.new_request()
self.assertEqual(edit_form.form_heading, 'Edit: Edit Node')
@testing.reset_node_info_registry
def test_editing(self):
@node_info(
name='mynode',
title='My Node')
class MyNode(BaseNode):
pass
# Create and register an ``editform`` named form tile
with self.layer.hook_tile_reg():
@tile(name='editform', interface=MyNode)
@plumbing(ContentEditForm)
class MyEditForm(Form):
def prepare(self):
form = factory(
u'form',
name='editform',
props={
'action': self.nodeurl
})
form['title'] = factory(
'field:label:text',
value=self.model.attrs.title,
props={
'label': 'Title',
})
form['update'] = factory(
'submit',
props={
'action': 'update',
'expression': True,
'handler': self.update,
'next': self.next,
'label': 'Update',
})
self.form = form
def update(self, widget, data):
fetch = self.request.params.get
self.model.attrs.title = fetch('editform.title')
# Dummy model
root = MyNode()
child = root['somechild'] = MyNode()
child.attrs.title = 'My Node'
# Render form with value from model
with self.layer.authenticated('editor'):
request = self.layer.new_request()
res = render_tile(root['somechild'], request, 'edit')
self.checkOutput("""
...<span class="label label-primary">Edit: My Node</span>...
<form action="http://example.com/somechild"...
""", res)
# Render with submitted data. Default next URL of EditForm is the
# edited node
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
res = render_tile(root['somechild'], request, 'edit')
self.assertEqual(
request.environ['redirect'].location,
'http://example.com/somechild'
)
# Check next URL with ``parent`` as ``came_from`` value
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
request.params['came_from'] = 'parent'
res = render_tile(root['somechild'], request, 'edit')
self.assertEqual(
request.environ['redirect'].location,
'http://example.com/'
)
# Check next URL with URL as ``came_from`` value
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
came_from = compat.quote('http://example.com/other/node/in/tree')
request.params['came_from'] = came_from
res = render_tile(root['somechild'], request, 'edit')
self.assertEqual(
request.environ['redirect'].location,
'http://example.com/other/node/in/tree'
)
# Render with ajax flag
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
request.params['ajax'] = '1'
res = render_tile(root['somechild'], request, 'edit')
self.assertTrue(isinstance(
request.environ['cone.app.continuation'][0],
AjaxEvent
))
# URL computing is the same as if ``HTTPFound`` instance is returned.
# In Ajax case, the URL is used as ajax target
self.assertEqual(
request.environ['cone.app.continuation'][0].target,
'http://example.com/somechild'
)
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
came_from = compat.quote('http://example.com/other/node/in/tree')
request.params['came_from'] = came_from
request.params['ajax'] = '1'
res = render_tile(root['somechild'], request, 'edit')
self.assertEqual(
request.environ['cone.app.continuation'][0].target,
'http://example.com/other/node/in/tree'
)
# Check the updated node
self.assertEqual(root['somechild'].attrs.title, 'Changed title')
# Edit view
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
root.attrs.title = 'Foo'
res = edit(root, request)
self.assertTrue(isinstance(res, HTTPFound))
with self.layer.authenticated('editor'):
request = self.layer.new_request()
request.params['action.editform.update'] = '1'
request.params['editform.title'] = 'Changed title'
request.params['ajax'] = '1'
res = str(edit(root, request))
self.assertTrue(res.find('parent.bdajax.render_ajax_form') != -1)
def test_deleting(self):
class CallableNode(BaseNode):
def __call__(self):
pass
node = CallableNode()
node['child'] = CallableNode()
self.checkOutput("""
<class '...CallableNode'>: None
<class '...CallableNode'>: child
""", node.treerepr())
del node['child']
self.checkOutput("""
<class '...CallableNode'>: None
""", node.treerepr())
node['child'] = CallableNode()
with self.layer.authenticated('manager'):
request = self.layer.new_request()
self.assertEqual(render_tile(node['child'], request, 'delete'), u'')
self.assertEqual(
request.environ['cone.app.continuation'][0].payload,
u'Object "child" not deletable'
)
node['child'].properties.action_delete = True
with self.layer.authenticated('manager'):
request = self.layer.new_request()
self.assertEqual(render_tile(node['child'], request, 'delete'), u'')
self.assertTrue(isinstance(
request.environ['cone.app.continuation'][0],
AjaxEvent
))
self.assertTrue(isinstance(
request.environ['cone.app.continuation'][1],
AjaxMessage
))
self.checkOutput("""
<class '...CallableNode'>: None
""", node.treerepr())
@testing.reset_node_info_registry
def test_add_items_dropdown(self):
@node_info(
name='mynode',
addables=['mynode'])
class MyNode(BaseNode):
pass
# Dummy model
root = MyNode()
root['somechild'] = MyNode()
# child.attrs.title = 'My Node'
# Dropdown menu containing links to the addforms of allowed child nodes
with self.layer.authenticated('manager'):
request = self.layer.new_request()
rendered = render_tile(root['somechild'], request, 'add_dropdown')
# Non JS link to add form
expected = 'href="http://example.com/somechild/add?factory=mynode"'
self.assertTrue(rendered.find(expected) != -1)
# Ajax target for add form
expected = 'ajax:target="http://example.com/somechild?factory=mynode"'
self.assertTrue(rendered.find(expected) != -1)
# Ajax action rule for add form
expected = 'ajax:action="add:#content:inner"'
self.assertTrue(rendered.find(expected) != -1)
# Allow another node type as child
nodeinfo = NodeInfo()
register_node_info('anothernode', nodeinfo)
get_node_info('mynode').addables = ['mynode', 'anothernode']
with self.layer.authenticated('manager'):
request = self.layer.new_request()
rendered = render_tile(root['somechild'], request, 'add_dropdown')
# Non JS links to add form
expected = 'href="http://example.com/somechild/add?factory=mynode"'
self.assertTrue(rendered.find(expected) != -1)
expected = 'href="http://example.com/somechild/add?factory=anothernode"'
self.assertTrue(rendered.find(expected) != -1)
# Ajax targets for add form
expected = 'ajax:target="http://example.com/somechild?factory=mynode"'
self.assertTrue(rendered.find(expected) != -1)
expected = 'ajax:target="http://example.com/somechild?factory=anothernode"'
self.assertTrue(rendered.find(expected) != -1)
# Test node without addables, results in empty listing.
# XXX: hide entire widget if no items
@node_info(name='nochildaddingnode')
class NoChildAddingNode(BaseNode):
pass
with self.layer.authenticated('manager'):
request = self.layer.new_request()
rendered = render_tile(NoChildAddingNode(), request, 'add_dropdown')
self.checkOutput("""
...<li class="dropdown">
<a href="#"
class="dropdown-toggle"
data-toggle="dropdown">
<span>Add</span>
<span class="caret"></span>
</a>
<ul class="dropdown-menu" role="addmenu">
</ul>
</li>...
""", rendered)
# Test node with invalid addable, results in empty listing
# XXX: hide entire widget if no items
@node_info(
name='invalidchildnodeinfo',
addables=['invalid'])
class InvalidChildNodeInfoNode(BaseNode):
pass
with self.layer.authenticated('manager'):
request = self.layer.new_request()
rendered = render_tile(
InvalidChildNodeInfoNode(),
request,
'add_dropdown'
)
self.checkOutput("""
...<li class="dropdown">
<a href="#"
class="dropdown-toggle"
data-toggle="dropdown">
<span>Add</span>
<span class="caret"></span>
</a>
<ul class="dropdown-menu" role="addmenu">
</ul>
</li>...
""", rendered)
def test_overlay_form(self):
with self.layer.hook_tile_reg():
@tile(name='overlayform', interface=BaseNode)
@plumbing(OverlayForm)
class MyOverlayForm(Form):
def prepare(self):
form = factory(
u'form',
name='overlayform',
props={
'action': self.nodeurl + '/' + self.action_resource
})
form['title'] = factory(
'field:label:error:text',
value=self.model.attrs.title,
props={
'label': 'Title',
'required': 'Title is required'
})
form['update'] = factory(
'submit',
props={
'action': 'update',
'expression': True,
'handler': self.update,
'next': self.next,
'label': 'Update',
})
self.form = form
def update(self, widget, data):
fetch = self.request.params.get
self.model.attrs.title = fetch('editform.title')
model = BaseNode(name='root')
model.attrs.title = u'Title'
# Overlay form invocation happens via overlay form entry tile
request = self.layer.new_request()
request.params['ajax'] = '1'
with self.layer.authenticated('max'):
res = render_tile(model, request, 'overlayformtile')
expected = '<form action="http://example.com/root/overlayform"'
self.assertTrue(res.startswith(expected))
expected = 'class="ajax"'
self.assertTrue(res.find(expected) > -1)
self.assertEqual(
request.environ['cone.app.form.selector'],
'#ajax-overlay .overlay_content'
)
self.assertEqual(request.environ['cone.app.form.mode'], 'inner')
# Overlay form sumbmission happens via related pyramid view
# Case form error
request = self.layer.new_request()
request.params['ajax'] = '1'
request.params['overlayform.title'] = ''
request.params['action.overlayform.update'] = '1'
with self.layer.authenticated('max'):
res = overlayform(model, request)
expected = '<div id="ajaxform">'
self.assertTrue(res.text.startswith(expected))
expected = '<form action="http://example.com/root/overlayform"'
self.assertTrue(res.text.find(expected) > -1)
expected = '<div class="errormessage">Title is required</div>'
self.assertTrue(res.text.find(expected) > -1)
expected = '<script'
self.assertTrue(res.text.find(expected) > -1)
expected = (
"parent.bdajax.render_ajax_form("
"child, '#ajax-overlay .overlay_content', 'inner', false);"
)
self.assertTrue(res.text.find(expected) > -1)
# Case form success
request = self.layer.new_request()
request.params['ajax'] = '1'
request.params['overlayform.title'] = 'New Title'
request.params['action.overlayform.update'] = '1'
with self.layer.authenticated('max'):
res = overlayform(model, request)
expected = '<div id="ajaxform">'
self.assertTrue(res.text.startswith(expected))
self.assertFalse(res.text.find('<form') > -1)
expected = '<script'
self.assertTrue(res.text.find(expected) > -1)
expected = (
"parent.bdajax.render_ajax_form("
"child, '#ajax-overlay .overlay_content', 'inner', ["
)
self.assertTrue(res.text.find(expected) > -1)
expected = '"close": true'
self.assertTrue(res.text.find(expected) > -1)
@testing.reset_node_info_registry
def test_overlay_add(self):
@node_info(
name='mynode',
addables=['mynode'])
class MyNode(BaseNode):
pass
with self.layer.hook_tile_reg():
@tile(name='overlayaddform', interface=MyNode)
@plumbing(OverlayAddForm)
class MyOverlayAddForm(Form):
def prepare(self):
form = factory(
u'form',
name='overlayaddform',
props={
'action': self.nodeurl + '/' + self.action_resource
})
form['title'] = factory(
'field:label:error:text',
props={
'label': 'Title',
'required': 'Title is required'
})
| |
from collections import defaultdict
import unittest
import math
import sys
from validateModeller import *
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
if sys.version_info >= (3, 0):
from io import StringIO
else:
from cStringIO import StringIO
class TestModeller(unittest.TestCase):
""" Test the Modeller class. """
def setUp(self):
# load the alanine dipeptide pdb file
self.pdb = PDBFile('systems/alanine-dipeptide-explicit.pdb')
self.topology_start = self.pdb.topology
self.positions = self.pdb.positions
self.forcefield = ForceField('amber10.xml', 'tip3p.xml')
# load the T4-lysozyme-L99A receptor pdb file
self.pdb2 = PDBFile('systems/lysozyme-implicit.pdb')
self.topology_start2 = self.pdb2.topology
self.positions2 = self.pdb2.positions
# load the metallothionein pdb file
self.pdb3 = PDBFile('systems/1T2Y.pdb')
self.topology_start3 = self.pdb3.topology
self.positions3 = self.pdb3.positions
def test_deleteWater(self):
""" Test the deleteWater() method. """
# build the chain dictionary
chain_dict = {0:0}
# 749 water chains are deleted
chain_delta = -749
# Build the residue and atom dictionaries for validate_preserved.
# Also, count the number of deleted residues and atoms.
residues_preserved = 0
residue_delta = 0
residue_dict = {}
atoms_preserved = 0
atom_delta = 0
atom_dict = {}
for residue in self.topology_start.residues():
if residue.name!='HOH' and residue.name!='WAT':
residue_dict[residue.index] = residues_preserved
residues_preserved += 1
for atom in residue.atoms():
atom_dict[atom.index] = atoms_preserved
atoms_preserved += 1
else:
residue_delta -= 1
for atom in residue.atoms():
atom_delta -= 1
modeller = Modeller(self.topology_start, self.positions)
modeller.deleteWater()
topology_after = modeller.getTopology()
validate_preserved(self, self.topology_start, topology_after,
chain_dict, residue_dict, atom_dict)
validate_deltas(self, self.topology_start, topology_after,
chain_delta, residue_delta, atom_delta)
def test_delete(self):
""" Test the delete() method. """
modeller = Modeller(self.topology_start, self.positions)
topology_before = modeller.getTopology()
# Create the list of items to be deleted.
# Start with the first 50 water chains
chains = [chain for chain in topology_before.chains()]
toDelete = chains[1:51]
# Next add water residues 103->152 to the list of items to be deleted
residues = [residue for residue in topology_before.residues()]
toDelete.extend(residues[103:153])
# Finally add water atoms 622->771 to the list of items to be deleted
atoms = [atom for atom in topology_before.atoms()]
toDelete.extend(atoms[622:772])
modeller.delete(toDelete)
topology_after = modeller.getTopology()
# build the chain dictionary
chain_dict = {0:0}
for i in range(1,51):
chain_dict[i+50] = i
for i in range(51,101):
chain_dict[i+100] = i
for i in range(101, 600):
chain_dict[i+150] = i
# build the residue dictionary
residue_dict = {}
for i in range(3):
residue_dict[i] = i
for i in range(3,53):
residue_dict[i+50] = i
for i in range(53, 103):
residue_dict[i+100] = i
for i in range(103, 602):
residue_dict[i+150] = i
# build the atom dictionary
atom_dict = {}
for i in range(22):
atom_dict[i] = i
for i in range(22,172):
atom_dict[i+150] = i
for i in range(172,322):
atom_dict[i+300] = i
for i in range(322,1819):
atom_dict[i+450] = i
validate_preserved(self, topology_before, topology_after, chain_dict, residue_dict, atom_dict)
chain_delta = -150
residue_delta = -150
atom_delta = -450
validate_deltas(self, topology_before, topology_after, chain_delta, residue_delta, atom_delta)
def test_add(self):
""" Test the add() method. """
# load the methanol-box pdb file
pdb2 = PDBFile('systems/methanol-box.pdb')
topology_toAdd = pdb2.topology
positions_toAdd = pdb2.positions
modeller = Modeller(self.topology_start, self.positions)
modeller.deleteWater()
topology_before = modeller.getTopology()
modeller.add(topology_toAdd, positions_toAdd)
topology_after = modeller.getTopology()
# build the first chain dictionary for the first call of validate_preserved()
chain_counter = 0
chain_dict = {}
for chain in topology_before.chains():
chain_dict[chain.index] = chain_counter
chain_counter += 1
# build the residue and atom dictionaries for the first call of validate_preserved()
residue_counter = 0
residue_dict = {}
atom_counter = 0
atom_dict = {}
for residue in topology_before.residues():
residue_dict[residue.index] = residue_counter
residue_counter += 1
for atom in residue.atoms():
atom_dict[atom.index] = atom_counter
atom_counter += 1
# Validate that the items from the before topology are preserved after addition of items.
validate_preserved(self, topology_before, topology_after, chain_dict, residue_dict, atom_dict)
# Next, we build another set of dictionaries to validate that the items added are
# preserved. Also, we calculate the number of chains, residues, and atoms added.
# build the chain dictionary
chain_delta = 0
chain_dict = {}
for chain in topology_toAdd.chains():
chain_dict[chain.index] = chain_counter
chain_counter += 1
chain_delta += 1
# build the residue and atom dictionaries for the second call of validate_preserved
residue_delta = 0
residue_dict = {}
atom_delta = 0
atom_dict = {}
for residue in topology_toAdd.residues():
residue_dict[residue.index] = residue_counter
residue_counter += 1
residue_delta += 1
for atom in residue.atoms():
atom_dict[atom.index] = atom_counter
atom_counter += 1
atom_delta += 1
# validate that the items in the added topology are preserved
validate_preserved(self, topology_toAdd, topology_after, chain_dict, residue_dict, atom_dict)
# validate that the final topology has the correct number of items
validate_deltas(self, topology_before, topology_after, chain_delta, residue_delta, atom_delta)
def test_convertWater(self):
""" Test the convertWater() method. """
for model in ['tip3p', 'spce', 'tip4pew', 'tip5p']:
if model == 'tip5p':
firstmodel = 'tip4pew'
else:
firstmodel = 'tip5p'
modeller = Modeller(self.topology_start, self.positions)
modeller.convertWater(model=firstmodel)
modeller.convertWater(model=model)
topology_after = modeller.getTopology()
for residue in topology_after.residues():
if residue.name == "HOH":
oatom = [atom for atom in residue.atoms() if atom.element == element.oxygen]
hatoms = [atom for atom in residue.atoms() if atom.element == element.hydrogen]
matoms = [atom for atom in residue.atoms() if atom.name == 'M']
m1atoms = [atom for atom in residue.atoms() if atom.name == 'M1']
m2atoms = [atom for atom in residue.atoms() if atom.name == 'M2']
self.assertTrue(len(oatom)==1 and len(hatoms)==2)
if model=='tip3p' or model=='spce':
self.assertTrue(len(matoms)==0 and len(m1atoms)==0 and len(m2atoms)==0)
elif model=='tip4pew':
self.assertTrue(len(matoms)==1 and len(m1atoms)==0 and len(m2atoms)==0)
elif model=='tip5p':
self.assertTrue(len(matoms)==0 and len(m1atoms)==1 and len(m2atoms)==1)
# build the chain dictionary for validate_preserved
chain_counter = 0
chain_dict = {}
chain_delta = 0
for chain in self.topology_start.chains():
chain_dict[chain.index] = chain_counter
chain_counter += 1
# build the residue and atom dictionaries for validate_preserved
residue_counter = 0
residue_dict = {}
residue_delta = 0
atom_counter = 0
atom_dict = {}
atom_delta = 0
for residue in self.topology_start.residues():
residue_dict[residue.index] = residue_counter
residue_counter += 1
for atom in residue.atoms():
atom_dict[atom.index] = atom_counter
atom_counter += 1
if residue.name == 'HOH' and model == 'tip4pew':
atom_counter += 1
atom_delta += 1
if residue.name == 'HOH' and model == 'tip5p':
atom_counter += 2
atom_delta += 2
validate_preserved(self, self.topology_start, topology_after,
chain_dict, residue_dict, atom_dict)
validate_deltas(self, self.topology_start, topology_after,
chain_delta, residue_delta, atom_delta)
def test_addSolventWaterModels(self):
""" Test all addSolvent() method with all possible water models. """
topology_start = self.pdb.topology
topology_start.setUnitCellDimensions(Vec3(3.5, 3.5, 3.5)*nanometers)
for model in ['tip3p', 'spce', 'tip4pew', 'tip5p']:
forcefield = ForceField('amber10.xml', model + '.xml')
modeller = Modeller(topology_start, self.positions)
# delete water to get the "before" topology
modeller.deleteWater()
topology_before = modeller.getTopology()
# add the solvent to get the "after" topology
modeller.addSolvent(forcefield, model=model)
topology_after = modeller.getTopology()
# First, check that everything that was there before has been preserved.
# build the chain dictionary for validate_preserved
chain_counter = 0
chain_dict = {0:0}
for chain in topology_before.chains():
chain_dict[chain.index] = chain_counter
chain_counter += 1
# build the residue and atom dictionaries for validate_preserved
residue_counter = 0
residue_dict = {}
atom_counter = 0
atom_dict = {}
for residue in topology_before.residues():
residue_dict[residue.index] = residue_counter
residue_counter += 1
for atom in residue.atoms():
atom_dict[atom.index] = atom_counter
atom_counter += 1
# validate that the items in the before topology remain after solvent is added
validate_preserved(self, topology_before, topology_after, chain_dict, residue_dict, atom_dict)
# Make sure water that was added was the correct model
for residue in topology_after.residues():
if residue.name == 'HOH':
oatom = [atom for atom in residue.atoms() if atom.element == element.oxygen]
hatoms = [atom for atom in residue.atoms() if atom.element == element.hydrogen]
matoms = [atom for atom in residue.atoms() if atom.name == 'M']
m1atoms = [atom for atom in residue.atoms() if atom.name == 'M1']
m2atoms = [atom for atom in residue.atoms() if atom.name == 'M2']
self.assertTrue(len(oatom)==1 and len(hatoms)==2)
if model=='tip3p' or model=='spce':
self.assertTrue(len(matoms)==0 and len(m1atoms)==0 and len(m2atoms)==0)
elif model=='tip4pew':
self.assertTrue(len(matoms)==1 and len(m1atoms)==0 and len(m2atoms)==0)
elif model=='tip5p':
self.assertTrue(len(matoms)==0 and len(m1atoms)==1 and len(m2atoms)==1)
def test_addSolventPeriodicBox(self):
""" Test the addSolvent() method; test that the five ways of passing in the periodic box all work. """
# First way of passing in periodic box vectors: set it in the original topology.
topology_start = self.pdb.topology
topology_start.setUnitCellDimensions(Vec3(3.5, 4.5, 5.5)*nanometers)
modeller = Modeller(topology_start, self.positions)
modeller.deleteWater()
modeller.addSolvent(self.forcefield)
topology_after = modeller.getTopology()
dim3 = topology_after.getPeriodicBoxVectors()
self.assertVecAlmostEqual(dim3[0]/nanometers, Vec3(3.5, 0, 0))
self.assertVecAlmostEqual(dim3[1]/nanometers, Vec3(0, 4.5, 0))
self.assertVecAlmostEqual(dim3[2]/nanometers, Vec3(0, 0, 5.5))
# Second way of passing in the periodic box vectors: with the boxSize parameter to addSolvent()
topology_start = self.pdb.topology
modeller = Modeller(topology_start, self.positions)
modeller.deleteWater()
modeller.addSolvent(self.forcefield, boxSize = Vec3(3.6, | |
raise CredentialNotVerifiable("Malformed XML: No credential tag found")
# Just take the first cred if there are more than one
cred = creds[0]
self.set_refid(cred.getAttribute("xml:id"))
self.set_expiration(utcparse(getTextNode(cred, "expires")))
# import traceback
# stack = traceback.extract_stack()
og = getTextNode(cred, "owner_gid")
# ABAC creds will have this be None and use this method
# if og is None:
# found = False
# for frame in stack:
# if 'super(ABACCredential, self).decode()' in frame:
# found = True
# break
# if not found:
# raise CredentialNotVerifiable("Malformed XML: No owner_gid found")
self.gidCaller = GID(string=og)
tg = getTextNode(cred, "target_gid")
# if tg is None:
# found = False
# for frame in stack:
# if 'super(ABACCredential, self).decode()' in frame:
# found = True
# break
# if not found:
# raise CredentialNotVerifiable("Malformed XML: No target_gid found")
self.gidObject = GID(string=tg)
# Process privileges
rlist = Rights()
priv_nodes = cred.getElementsByTagName("privileges")
if len(priv_nodes) > 0:
privs = priv_nodes[0]
for priv in privs.getElementsByTagName("privilege"):
kind = getTextNode(priv, "name")
deleg = str2bool(getTextNode(priv, "can_delegate"))
if kind == '*':
# Convert * into the default privileges for the credential's type
# Each inherits the delegatability from the * above
_ , type = urn_to_hrn(self.gidObject.get_urn())
rl = determine_rights(type, self.gidObject.get_urn())
for r in rl.rights:
r.delegate = deleg
rlist.add(r)
else:
rlist.add(Right(kind.strip(), deleg))
self.set_privileges(rlist)
# Is there a parent?
parent = cred.getElementsByTagName("parent")
if len(parent) > 0:
parent_doc = parent[0].getElementsByTagName("credential")[0]
parent_xml = parent_doc.toxml("utf-8")
if parent_xml is None or parent_xml.strip() == "":
raise CredentialNotVerifiable("Malformed XML: Had parent tag but it is empty")
self.parent = Credential(string=parent_xml)
self.updateRefID()
# Assign the signatures to the credentials
for sig in sigs:
Sig = Signature(string=sig.toxml("utf-8"))
for cur_cred in self.get_credential_list():
if cur_cred.get_refid() == Sig.get_refid():
cur_cred.set_signature(Sig)
##
# Verify
# trusted_certs: A list of trusted GID filenames (not GID objects!)
# Chaining is not supported within the GIDs by xmlsec1.
#
# trusted_certs_required: Should usually be true. Set False means an
# empty list of trusted_certs would still let this method pass.
# It just skips xmlsec1 verification et al. Only used by some utils
#
# Verify that:
# . All of the signatures are valid and that the issuers trace back
# to trusted roots (performed by xmlsec1)
# . The XML matches the credential schema
# . That the issuer of the credential is the authority in the target's urn
# . In the case of a delegated credential, this must be true of the root
# . That all of the gids presented in the credential are valid
# . Including verifying GID chains, and includ the issuer
# . The credential is not expired
#
# -- For Delegates (credentials with parents)
# . The privileges must be a subset of the parent credentials
# . The privileges must have "can_delegate" set for each delegated privilege
# . The target gid must be the same between child and parents
# . The expiry time on the child must be no later than the parent
# . The signer of the child must be the owner of the parent
#
# -- Verify does *NOT*
# . ensure that an xmlrpc client's gid matches a credential gid, that
# must be done elsewhere
#
# @param trusted_certs: The certificates of trusted CA certificates
def verify(self, trusted_certs=None, schema=None, trusted_certs_required=True):
if not self.xml:
self.decode()
# validate against RelaxNG schema
if HAVELXML and not self.legacy:
if schema and os.path.exists(schema):
tree = etree.parse(StringIO(self.xml))
schema_doc = etree.parse(schema)
xmlschema = etree.XMLSchema(schema_doc)
if not xmlschema.validate(tree):
error = xmlschema.error_log.last_error
message = "%s: %s (line %s)" % (self.get_summary_tostring(), error.message, error.line)
raise CredentialNotVerifiable(message)
if trusted_certs_required and trusted_certs is None:
trusted_certs = []
# trusted_cert_objects = [GID(filename=f) for f in trusted_certs]
trusted_cert_objects = []
ok_trusted_certs = []
# If caller explicitly passed in None that means skip cert chain validation.
# Strange and not typical
if trusted_certs is not None:
for f in trusted_certs:
try:
# Failures here include unreadable files
# or non PEM files
trusted_cert_objects.append(GID(filename=f))
ok_trusted_certs.append(f)
except Exception, exc:
logger.error("Failed to load trusted cert from %s: %r", f, exc)
trusted_certs = ok_trusted_certs
# Use legacy verification if this is a legacy credential
if self.legacy:
self.legacy.verify_chain(trusted_cert_objects)
if self.legacy.client_gid:
self.legacy.client_gid.verify_chain(trusted_cert_objects)
if self.legacy.object_gid:
self.legacy.object_gid.verify_chain(trusted_cert_objects)
return True
# make sure it is not expired
if self.get_expiration() < datetime.datetime.utcnow():
raise CredentialNotVerifiable("Credential %s expired at %s" % (self.get_summary_tostring(), self.expiration.isoformat()))
# Verify the signatures
filename = self.save_to_random_tmp_file()
if trusted_certs is not None:
cert_args = " ".join(['--trusted-pem %s' % x for x in trusted_certs])
# If caller explicitly passed in None that means skip cert chain validation.
# - Strange and not typical
if trusted_certs is not None:
# Verify the gids of this cred and of its parents
for cur_cred in self.get_credential_list():
cur_cred.get_gid_object().verify_chain(trusted_cert_objects)
cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
refs = []
refs.append("Sig_%s" % self.get_refid())
parentRefs = self.updateRefID()
for ref in parentRefs:
refs.append("Sig_%s" % ref)
for ref in refs:
# If caller explicitly passed in None that means skip xmlsec1 validation.
# Strange and not typical
if trusted_certs is None:
break
# print "Doing %s --verify --node-id '%s' %s %s 2>&1" % \
# (self.xmlsec_path, ref, cert_args, filename)
verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \
% (self.xmlsec_path, ref, cert_args, filename)).read()
if not verified.strip().startswith("OK"):
# xmlsec errors have a msg= which is the interesting bit.
mstart = verified.find("msg=")
msg = ""
if mstart > -1 and len(verified) > 4:
mstart = mstart + 4
mend = verified.find('\\', mstart)
msg = verified[mstart:mend]
raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))
os.remove(filename)
# Verify the parents (delegation)
if self.parent:
self.verify_parent(self.parent)
# Make sure the issuer is the target's authority, and is
# itself a valid GID
self.verify_issuer(trusted_cert_objects)
return True
##
# Creates a list of the credential and its parents, with the root
# (original delegated credential) as the last item in the list
def get_credential_list(self):
cur_cred = self
list = []
while cur_cred:
list.append(cur_cred)
if cur_cred.parent:
cur_cred = cur_cred.parent
else:
cur_cred = None
return list
##
# Make sure the credential's target gid (a) was signed by or (b)
# is the same as the entity that signed the original credential,
# or (c) is an authority over the target's namespace.
# Also ensure that the credential issuer / signer itself has a valid
# GID signature chain (signed by an authority with namespace rights).
def verify_issuer(self, trusted_gids):
root_cred = self.get_credential_list()[-1]
root_target_gid = root_cred.get_gid_object()
if root_cred.get_signature() is None:
# malformed
raise CredentialNotVerifiable("Could not verify credential owned by %s for object %s. Cred has no signature" % (self.gidCaller.get_urn(), self.gidObject.get_urn()))
root_cred_signer = root_cred.get_signature().get_issuer_gid()
# Case 1:
# Allow non authority to sign target and cred about target.
#
# Why do we need to allow non authorities to sign?
# If in the target gid validation step we correctly
# checked that the target is only signed by an authority,
# then this is just a special case of case 3.
# This short-circuit is the common case currently -
# and cause GID validation doesn't check 'authority',
# this allows users to generate valid slice credentials.
if root_target_gid.is_signed_by_cert(root_cred_signer):
# cred signer matches target signer, return success
return
# Case 2:
# Allow someone to sign credential about themeselves. Used?
# If not, remove this.
#root_target_gid_str = root_target_gid.save_to_string()
#root_cred_signer_str = root_cred_signer.save_to_string()
#if root_target_gid_str == root_cred_signer_str:
# # cred signer is target, return success
# return
# Case 3:
# root_cred_signer is not the target_gid
# So this is a different gid that we have not verified.
# xmlsec1 verified the cert chain on this already, but
# it hasn't verified that the gid meets the HRN namespace
# requirements.
# Below we'll ensure that it is an authority.
# But we haven't verified that it is _signed by_ an authority
# We also don't know if xmlsec1 requires that cert signers
# are marked as CAs.
# Note that if verify() gave us no trusted_gids then this
# call will | |
longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = data[var + '_next'][:, lats, :][:, :, longs]
X2 = X2.reshape(-1, X2.shape[1] * X2.shape[2])
level = var + '_next_' + area_name
self.logger.info('Begin PCA training for %s', level)
X2_compressed = self.PCA_transform(X2, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'flux_' + area_name if var=='Flux' else 'wind_' + area_name
var_sort = 'fl_' + area_name if var=='Flux' else 'ws_' + area_name
col = ['p_' + var_name + '.' + str(i) for i in range(3)]
col += ['n_' + var_name + '.' + str(i) for i in range(3)]
col += [var_name + '.' + str(i) for i in range(9)]
X = np.hstack((X0_compressed, X2_compressed, X1_compressed))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 9, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'cloud_' + area_name if var=='Cloud' else 'direction_' + area_name
var_sort = 'cl_' + area_name if var=='Cloud' else 'wd_' + area_name
col = [var_name + '.' + str(i) for i in range(9)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type']=='pv')):
X1 = data[var][:, lats, :][:, :, longs]
X1 = X1.reshape(-1, X1.shape[1] * X1.shape[2])
level = var + area_name
self.logger.info('Begin PCA training for %s', level)
X1_compressed = self.PCA_transform(X1, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
var_sort = 'tp_' + area_name if var == 'Temperature' else 'ws_' + area_name
col = [var_name + '.' + str(i) for i in range(3)]
X = X1_compressed
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
else:
continue
for var in self.variables:
if ((var == 'WS') and (self.static_data['type'] == 'wind')) or (
(var == 'Flux') and (self.static_data['type'] == 'pv')):
col = []
col_p = []
col_n = []
for area_name, area in areas.items():
var_name = 'flux_' + area_name if var == 'Flux' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
col_p += ['p_' + var_name + '.' + str(i) for i in range(3)]
col_n += ['n_' + var_name + '.' + str(i) for i in range(3)]
var_name = 'flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
var_name = 'p_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_p].mean(axis=1)
var_name = 'n_flux' if var == 'Flux' else 'wind'
dataset_X[var_name] = dataset_X[col_n].mean(axis=1)
elif var in {'WD', 'Cloud'}:
col = []
for area_name, area in areas.items():
var_name = 'cloud_' + area_name if var == 'Cloud' else 'direction_' + area_name
col += [var_name + '.' + str(i) for i in range(9)]
var_name = 'cloud' if var == 'Cloud' else 'direction'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
elif (var in {'Temperature'}) or ((var == 'WS') and (self.static_data['type'] == 'pv')):
col = []
for area_name, area in areas.items():
var_name = 'Temp_' + area_name if var == 'Temperature' else 'wind_' + area_name
col += [var_name + '.' + str(i) for i in range(3)]
var_name = 'Temp' if var == 'Temperature' else 'wind'
dataset_X[var_name] = dataset_X[col].mean(axis=1)
return dataset_X
def make_dataset_res_online(self, utc=False):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
data, X_3d = self.get_3d_dataset_online(utc)
if not isinstance(self.areas, dict):
X = self.dataset_for_single_farm_online(data)
else:
dates_stack = []
for t in self.dates:
pdates = pd.date_range(t + pd.DateOffset(hours=24), t + pd.DateOffset(hours=47), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
flag = False
for i, pdates in enumerate(dates_stack):
t= self.dates[i]
fname = os.path.join(self.path_nwp_project, self.nwp_model + '_' + t.strftime('%d%m%y') + '.pickle')
if os.path.exists(fname):
nwps = joblib.load(fname)
for date in pdates:
try:
nwp = nwps[date]
if len(nwp['lat'].shape) == 1:
nwp['lat'] = nwp['lat'][:, np.newaxis]
if len(nwp['long'].shape) == 1:
nwp['long'] = nwp['long'][np.newaxis, :]
lats = (np.where((nwp['lat'][:, 0] >= self.area_group[0][0]) & (nwp['lat'][:, 0] <= self.area_group[1][0])))[0]
longs = (np.where((nwp['long'][0, :] >= self.area_group[0][1]) & (nwp['long'][0, :] <= self.area_group[1][1])))[0]
lats_group = nwp['lat'][lats]
longs_group = nwp['long'][:, longs]
flag = True
break
except:
continue
if flag:
break
X = self.dataset_for_multiple_farms_online(data, self.areas, lats_group, longs_group)
return X, X_3d
def get_3d_dataset_online(self, utc):
def datetime_exists_in_tz(dt, tz):
try:
dt.tz_localize(tz)
return True
except:
return False
dates_stack = []
if utc:
pdates = pd.date_range(self.dates + pd.DateOffset(hours=25), self.dates + pd.DateOffset(hours=48), freq='H')
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates if dt in self.data.index]
dates_stack.append(dates)
else:
pdates = pd.date_range(self.dates + pd.DateOffset(hours=25), self.dates + pd.DateOffset(hours=48), freq='H')
indices = [i for i, t in enumerate(pdates) if datetime_exists_in_tz(t, tz=timezone('Europe/Athens'))]
pdates = pdates[indices]
pdates = pdates.tz_localize(timezone('Europe/Athens'))
pdates = pdates.tz_convert(timezone('UTC'))
dates = [dt.strftime('%d%m%y%H%M') for dt in pdates]
dates_stack.append(dates)
if not isinstance(self.areas, dict):
arrays = stack_daily_nwps(self.dates, dates_stack[0], self.path_nwp_project, self.nwp_model, self.areas, self.variables, self.compress, self.static_data['type'])
else:
arrays = stack_daily_nwps(self.dates, dates_stack[0], self.path_nwp_project, self.nwp_model, self.area_group,
self.variables, self.compress, self.static_data['type'])
X = np.array([])
data_var = dict()
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
data_var[var+'_prev'] = X
data_var[var] = X
data_var[var+'_next'] = X
else:
data_var[var] = X
data_var['dates'] = X
X_3d = np.array([])
nwp = arrays[0]
x_2d = arrays[1]
if x_2d.shape[0]!=0:
for var in nwp.keys():
if var != 'dates':
data_var[var] = stack_3d(data_var[var], nwp[var])
else:
data_var[var] = np.hstack((data_var[var], nwp[var]))
X_3d = stack_3d(X_3d, x_2d)
self.logger.info('NWP data stacked for date %s', arrays[2])
return data_var, X_3d
def dataset_for_single_farm_online(self, data):
dataset_X = pd.DataFrame()
if self.static_data['type'] == 'pv':
hours = [dt.hour for dt in data['dates']]
months = [dt.month for dt in data['dates']]
dataset_X = pd.concat(
[dataset_X, pd.DataFrame(np.stack([hours, months]).T, index=data['dates'], columns=['hour', 'month'])])
for var in self.variables:
if ((var == 'WS') and (self.static_data['type']=='wind')) or ((var == 'Flux') and (self.static_data['type']=='pv')):
X0 = np.transpose(data[var + '_prev'],[0, 2, 1])
X0_level0 = X0[:, 2, 2]
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_down'
self.logger.info('Begin PCA training for %s', level)
X1_level3d = self.PCA_transform(X1_level3d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[2, 3], [3, 2], [3, 3]]
ind = np.array(ind)
X1_level3u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_mid_up'
self.logger.info('Begin PCA training for %s', level)
X1_level3u = self.PCA_transform(X1_level3u, 2, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[0, j] for j in range(5)] + [[i, 0] for i in range(1, 5)]
ind = np.array(ind)
X1_level4d = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_down'
self.logger.info('Begin PCA training for %s', level)
X1_level4d = self.PCA_transform(X1_level4d, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
ind = [[4, j] for j in range(1, 5)] + [[i, 4] for i in range(1, 4)]
ind = np.array(ind)
X1_level4u = np.hstack([X1[:, indices[0], indices[1]].reshape(-1, 1) for indices in ind])
level = var + '_curr_out_up'
self.logger.info('Begin PCA training for %s', level)
X1_level4u = self.PCA_transform(X1_level4u, 3, level)
self.logger.info('Successfully PCA transform for %s', level)
X2 = np.transpose(data[var + '_next'],[0, 2, 1])
X2_level0 = X2[:, 2, 2]
var_name = 'flux' if var=='Flux' else 'wind'
var_sort = 'fl' if var=='Flux' else 'ws'
col = ['p_' + var_name] + ['n_' + var_name] + [var_name]
col = col + [var_sort + '_l1.' + str(i) for i in range(3)] + [var_sort + '_l2.' + str(i) for i in
range(2)]
col = col + [var_sort + '_l3d.' + str(i) for i in range(3)] + [var_sort + '_l3u.' + str(i) for i in
range(3)]
X = np.hstack((X0_level0.reshape(-1, 1), X2_level0.reshape(-1, 1), X1_level1.reshape(-1, 1), X1_level3d, X1_level3u, X1_level4d
, X1_level4u))
dataset_X = pd.concat([dataset_X, pd.DataFrame(X, index=data['dates'], columns=col)], axis=1)
elif var in {'WD', 'Cloud'}:
X1 = np.transpose(data[var],[0, 2, 1])
X1_level1 = X1[:, 2, 2]
ind = [[1, j] for j in range(1, 4)] + [[i, 1] for i in range(2, 4)]
ind = np.array(ind)
X1_level3d = | |
import math
from typing import Union
import numpy as np
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
class Rectangle:
def __init__(self):
self._name = None
self._local_vertex_1 = None
self._local_vertex_2 = None
self._temperature = None
self._emissivity = 1.0
self._type = None
self._is_reverse = False
self._local2global_xyz = None
self._local2global_rotation_angle = None
self._local2global_rotation_axis = None
self._global_vertex_1 = None
self._global_vertex_2 = None
@property
def name(self):
return self._name
@name.setter
def name(self, name: str):
self._name = name
@property
def local_vertex_1(self) -> np.ndarray:
return self._local_vertex_1
@local_vertex_1.setter
def local_vertex_1(self, vertex: Union[tuple, list, np.ndarray]):
self._local_vertex_1 = vertex
@property
def local_vertex_2(self) -> np.ndarray:
return self._local_vertex_2
@local_vertex_2.setter
def local_vertex_2(self, vertex: Union[tuple, list, np.ndarray]):
self._local_vertex_2 = vertex
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, temperature: float):
self._temperature = temperature
@property
def local2global_rotation_angle(self):
return self._local2global_rotation_angle
@local2global_rotation_angle.setter
def local2global_rotation_angle(self, angle):
self._local2global_rotation_angle = angle
@property
def local2global_rotation_axis(self):
return self._local2global_rotation_axis
@local2global_rotation_axis.setter
def local2global_rotation_axis(self, axis):
self._local2global_rotation_axis = axis
@property
def local2global_xyz(self):
return self._local2global_xyz
@local2global_xyz.setter
def local2global_xyz(self, xyz: Union[list, tuple, np.ndarray]):
self._local2global_xyz = xyz
@property
def global_vertex_1(self):
return self._global_vertex_1
@global_vertex_1.setter
def global_vertex_1(self, vertex):
self._global_vertex_1 = vertex
@property
def global_vertex_2(self):
return self._global_vertex_2
@global_vertex_2.setter
def global_vertex_2(self, vertex):
self._global_vertex_2 = vertex
@property
def type(self):
return self._type
@type.setter
def type(self, x: str):
self._type = x
@property
def is_reverse(self):
return self._is_reverse
@is_reverse.setter
def is_reverse(self, x: bool):
self._is_reverse = x
def local2global_vertices(
self,
v1=None,
v2=None,
xyz: Union[list, tuple, np.ndarray] = None,
axis: Union[list, tuple, np.ndarray] = None,
angle: float = None
):
# assign parameters
if v1:
self.local_vertex_1 = v1
if v2:
self.local_vertex_2 = v2
if xyz:
self.local2global_xyz = xyz
if axis:
self.local2global_rotation_axis = axis
if angle:
self.local2global_rotation_angle = angle
if not (self.local_vertex_1 or self.local_vertex_2):
raise ValueError('Missing local vertex information.')
# local2global rotation
rot_mat = rotation_matrix(self.local2global_rotation_axis, self.local2global_rotation_angle)
vertex_1 = np.dot(rot_mat, self.local_vertex_1)
vertex_2 = np.dot(rot_mat, self.local_vertex_2)
# local2global shift
vertex_1 += self.local2global_xyz
vertex_2 += self.local2global_xyz
# assign global vertices to object
self.global_vertex_1 = vertex_1
self.global_vertex_2 = vertex_2
return vertex_1, vertex_2
@staticmethod
def get_global_vertices(v1, v2):
def max_a_min_b(a, b):
# if a < b:
# a += b
# b = a - b
# a -= b
return a, b
xmax, xmin = max_a_min_b(v1[0], v2[0])
ymax, ymin = max_a_min_b(v1[1], v2[1])
zmax, zmin = max_a_min_b(v1[2], v2[2])
vv1 = [xmin, ymin, zmin]
vv2 = [xmax, ymax, zmin]
vv3 = [xmax, ymax, zmax]
vv4 = [xmin, ymin, zmax]
return vv1, vv2, vv3, vv4
def get_tra_command(self):
type = self.type
geometry = self.get_global_vertices(self.global_vertex_1, self.global_vertex_2)
geometry = [':'.join(['{:.3f}'.format(c) for c in v]) for v in geometry]
geometry = '*'.join(geometry)
name = self.name
temperature = self.temperature
reverse = self.is_reverse
emissivity = self._emissivity
return f'<Type={type}, Geometry={geometry}, Name={name}, Temperature={temperature}, Reverse={reverse}, Emissivity={emissivity}>'
def array_windows(
x: list,
z: list,
h: list,
w: list,
temperature: list,
angle: float,
local2global_xyz: np.ndarray = [0, 0, 0]
) -> list:
p_ = list()
for i, cx in enumerate(x):
z_ = z[i]
h_ = h[i]
w_ = w[i]
p = Rectangle()
p.name = f'w3_1_{i}'
p.local_vertex_1 = [cx - w_ / 2, 0, z_ - h_ / 2]
p.local_vertex_2 = [cx + w_ / 2, 0, z_ + h_ / 2]
p.local2global_rotation_axis = [0, 0, 1]
p.local2global_rotation_angle = angle
p.local2global_xyz = local2global_xyz
p.local2global_vertices()
p.type = 'Emitter'
p.is_reverse = True
p.temperature = temperature[i]
p_.append(p)
return p_
def w3_emitter():
"""
Type=Emitter
Geometry=5:0:0 * 10.2:0:0 * 10.2:-1.47:5.39 * 5:-1.47:5.39
Name=Glazing
Temperature=1105
Reverse=FALSE
Emissivity=1
<Type=Emitter,Geometry=5:0:0 * 10.2:0:0 * 10.2:-1.47:5.39 * 5:-1.47:5.39,Name=Glazing,Temperature=1105,Reverse=FALSE,Emissivity=1>
"""
angle = (180 + 90 + 9.5) / 180 * np.pi
# w3 - level 0
# x = [1.5, 1 * 6 + 1.5, 2 * 6 + 1.5, 4 * 6, 6 * 6 - 1.5, 7 * 6 - 1.5]
# x = [1.5, 1 * 6 + 1.5, 2 * 6 + 1.5, 4 * 6, 6 * 6 - 1.5]
# z = [1.75, 1.75, 1.75, 1.75, 1.75, 1.75]
# w = [3, 3, 3, 6, 3, 3]
# h = [3.5, 3.5, 3.5, 3.5, 3.5, 3.5]
# t = np.full_like(x, 1105)
# p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
# [print(p.get_tra_command()) for p in p_]
# w3 - level 1 timber facade
x = [0.75, 3.75, 6.75, 9.75, 12.75, 15.75, 32.25, 35.25, 38.25, 41.25, 44.25]
z = np.full_like(x, 4.25+3.55/2)
w = np.full_like(x, 1.5)
h = np.full_like(x, 3.55)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 1 window
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 30.75, 33.75, 36.75, 39.75, 42.75, 46.5]
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 30.75, 33.75, 36.75, 39.75, 42.75,] # fire rate 1 windows
z = np.full_like(x, 4.25+3.55/2)
w = np.full_like(x, 1.5)
w[-1] = 3
h = np.full_like(x, 3.55)
t = np.full_like(x, 1105)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 1 soffit timber
x = [9, 5*6+5*3/2]
z = np.full_like(x, 4.25+3.55+1.45/2)
w = [3*6, 5*3]
h = np.full_like(x, 1.45)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 2 timber facade
x = [0.75, 3.75, 6.75, 9.75, 12.75, 15.75, 32.25, 35.25, 38.25, 41.25, 44.25, 47.25]
z = np.full_like(x, 8.5+3.55/2)
w = np.full_like(x, 1.5)
h = np.full_like(x, 3.55)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 2 window
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 24, 30.75, 33.75, 36.75, 39.75, 42.75, 45.75]
x = [2.25, 5.25, 8.25, 11.25, 14.25, 17.25, 24, 30.75, 33.75, 36.75, 39.75, 42.75,] # fire rate the end three
z = np.full_like(x, 8.5+3.55/2)
w = np.full_like(x, 1.5)
w[6] = 12 # to add the central windows
h = np.full_like(x, 3.55)
t = np.full_like(x, 1105)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w3 - level 2 soffit timber
x = [9, 5*6+3*6/2]
z = np.full_like(x, 8.5+3.55+1.45/2)
w = [3*6, 3*6]
h = np.full_like(x, 1.45)
t = np.full_like(x, 931)
p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
[print(p.get_tra_command()) for p in p_]
# w2 - recessed windows
# x = [24]
# z = np.full_like(x, 4.25+3.55/2)
# w = np.full_like(x, 6)
# h = np.full_like(x, 3.55)
# t = np.full_like(x, 1105)
# local2global_xyz = np.array([0, -45, 0])
# p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle)
# [print(p.get_tra_command()) for p in p_]
# w3 - far end bit
# angle = (180 + 90 + 75) / 180 * np.pi
# x = [5.75/2, ]
# z = [3.5/2, ]
# w = [5.75, ]
# h = [3.5, ]
# t = np.full_like(x, 1105)
# local2global_xyz = np.array([7.8, -45, 0])
# p_ = array_windows(x=x, z=z, w=w, h=h, temperature=t, angle=angle, local2global_xyz=local2global_xyz)
# [print(p.get_tra_command()) for p in p_]
def w3_receiver():
angle = (180 + 90 + 9.5) / 180 * np.pi
# w3 - receiver
cx__ = [13.5 / 2]
cz__ = [15 / 2]
width__ = np.full_like(cx__, 55)
height__ = np.full_like(cx__, 13.5)
temperature__ = np.full_like(cx__, 293.15)
p_w3_lm = list()
for i, cx in enumerate(cx__):
cz = cz__[i]
h = height__[i]
w = width__[i]
p = Rectangle()
p.name = f'w3_m_{i}'
p.local_vertex_1 = [cx | |
zinc = 4.125,
annotstart = (1234, 5678),
annotinc = (5, 2),
corners = ((1000, 1000),
(3775, 1000),
(1000, 2890),
(3775, 2890)),
**kwargs
) as writer:
expect_datarange_1 = datarange
if datatype == SampleDataType.float and zgyWriterFactory != oldzgy.ZgyWriter:
# The value is unspecified. It could be NaN if the file was never
# flushed, or (0,0) if it was flushed before writing anything.
# Or it could be the (likely not calculated yet) statistical
# range if the code in api.ZgyMeta.datarange chooses to return
# the statistical range instead.
expect_datarange_1 = (0, 0)
#dump(filename, writer)
checkmeta(writer, datatype, expect_datarange_1)
if single_write:
# Read/modify/write is not allowed whan writing compressed data,
# or at least not recommended since noise will accumulate.
writer.write((0, 0, 0), createFancyBuffer(0, 0))
else:
writer.write((16,16,16), np.full((40,41,42), 31, dtype=np.float32))
writer.write((48,20,24), np.full((72,10,16), 97, dtype=np.float32))
writer.write((0,0,64), np.full((112,64,64), 0, dtype=np.float32))
# Statistics haven't been computed yet, so datarange for float cubes
# should still be returned as empty.
checkmeta(writer, datatype, expect_datarange_1)
with newzgy.ZgyReader(filename, iocontext = SDCredentials()) as reader:
expect_datarange_2 = datarange
if datatype == SampleDataType.float:
if True or zgyWriterFactory != oldzgy.ZgyWriter:
# The value has been explicitly set to the statistical range
# if written by the new writer. If api.ZgyMeta.datarange chooses
# to return the statistical range instead this this happens
# also for files written by the old accessor. The second
# conditinal should be disabled in that case.
expect_datarange_2 = (reader.statistics.min, reader.statistics.max)
checkmeta(reader, datatype, expect_datarange_2)
def checkmeta(meta, datatype = None, datarange = None):
"""
Verify round trip of metadata. This can be used both by a writer
(ensure the data we set is still available as properties) and a
reader (ensure the roundtrip to a stored file and back worked).
"""
assert(meta.size == (112, 64, 176))
assert(datatype is None or meta.datatype == datatype)
assert(datarange is None or meta.datarange == datarange)
assert(meta.raw_datarange == meta.datarange)
assert(meta.zunitdim == UnitDimension.time)
assert(meta.zunitname == "ms")
assert(abs(meta.zunitfactor - 0.001) < 1.0e-5)
assert(meta.hunitdim == UnitDimension.length)
assert(meta.hunitname == "ft")
assert(abs(meta.hunitfactor - 0.3048) < 0.0001)
assert(meta.zstart == 2500)
assert(abs(meta.zinc - 4.125) < 0.0001)
assert(meta.annotstart == (1234, 5678))
assert(meta.annotinc == (5, 2))
assert np.sum(np.abs(np.array(meta.corners) -
np.array(((1000, 1000),
(3775, 1000),
(1000, 2890),
(3775, 2890))))) < 0.0001
def explaincontents(expect, actual, delta):
"""
Detailed checking of a small part of the standard test cube.
A single trace that covers many special cases. Show an explanation
of what is being tested as well as expected vs. actual results.
See doc/testdata.png. This method is meant to be used to understand
why a particular test has failed.
"""
table = [( 0, 16, "default(r/m/w)"),
( 16, 24, "written once "),
( 24, 40, "written twice "),
( 40, 58, "written once "),
( 58, 63, "default(r/m/w)"),
( 64, 128, "constant-zero "),
(128, 176, "default(empty)")]
print("Displaying the trace at [50,22,:]")
for beg, end, text in table:
ex = expect[50,22,beg:end]
ac = actual[50,22,beg:end]
if np.amin(ex) == np.amax(ex) and np.amin(ac) == np.amax(ac):
print(" ", text, "expect", ex[0], "actual", ac[1])
else:
print(" ", text, "expect", ex, "actual", ac)
print(" largest error in entire cube:", delta)
def checkContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, *, maxdelta = 0.001):
"""
Read back the entire survey from one of the files created by
createFancyFile() and compare with the expected results.
Also check the metadata.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader, io.StringIO() as bitbucket:
# Improve coverage by exercising the debug log statements
verbose = lambda *args, **kwargs: print(*args, file=bitbucket, **kwargs)
checkmeta(reader)
actual = np.zeros((112, 64, 176), dtype=np.float32)
reader.read((0,0,0), actual, verbose = verbose)
delta = np.amax(np.abs(expect - actual))
if not delta <= maxdelta:
explaincontents(expect, actual, delta)
assert delta <= maxdelta
def compareArrays(expect, actual, value_epsilon = 0.02, count_epsilon = 0.01, *, verbose = False):
value_range = np.amax(expect) - np.amin(expect)
count_total = len(expect.flat)
# Error in each sample, relative to the total expected value range.
# Can technically be greater than 1 if "actual" has wild values.
# A value of e.g. <= 0.01 might be considered close enough.
value_delta = np.abs(expect - actual) / (value_range if value_range else 1)
count_bad = np.count_nonzero(value_delta > value_epsilon)
# In addition to the test for not exactly equal, allow a certain
# fraction of samples to differ by any amount. Typically this
# might be needed due to edge effects in lowres data.
relative_bad = count_bad / count_total
ok = relative_bad <= count_epsilon
if verbose:
print("{5}: {0:6d} of {1:7d} samples ({2:.2f}%) differ > {3:.2f}%. Allowed {4:.2f}%.".format(
count_bad, count_total, 100.0 * count_bad / count_total,
100.0 * value_epsilon, 100.0 * count_epsilon,
"pass" if ok else "FAIL"))
return ok
def showdecimation(lod0, lod1):
"""
Input 4 hires traces (2,2,n) and a corresponding decimated
trace (n//2) and display those to manually inspect the result.
"""
print(" decimated from these input samples")
for ii in range(0, lod0.shape[2], 2):
print("{0:10.5g} {1}".format(lod1[ii//2], list(lod0[:,:,ii:ii+2].flat)))
def checkLodContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue):
"""
As checkContents, but caller specifies which LOD to read and we
allow some slop in the result since the "expect" array uses trivial
decimation while the zgy writer uses something fancier.
NOTE: Due to bugs in the old writer, no checks are done for samples
where the fullres data has never been written. I have given up on
figuring out the current behavior; I just know that it is wrong.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
nlods = 1
size = np.array(reader.size, dtype=np.int64)
while np.any(size > reader.bricksize):
nlods += 1
size = (size + 1) // 2
assert nlods == reader.nlods
for lod in range(0, nlods):
step = 1<<lod
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
expect = expect[:,:,:128] # Hard coded edge of written data.
expect = expect[::step,::step,::step]
size = (np.array(reader.size, dtype=np.int64) + (step-1)) // step
size[2] = 128//step
actual = np.zeros(size, dtype=np.float32)
reader.read((0,0,0), actual, lod = lod)
ok = compareArrays(expect, actual,
value_epsilon = 0.02 if lod < 2 else 0.04,
count_epsilon = 0.01 if lod < 2 else 0.03)
if not ok:
deltas = np.abs(expect - actual).astype(np.float64)
# A single 2d section in the "interesting" part of the survey.
actual_2d = actual[:,22//step,:]
expect_2d = expect[:,22//step,:]
deltas_2d = deltas[:,22//step,:]
# A single trace in the "interesting" part of the survey.
expect_1d = expect_2d[50//step,:]
actual_1d = actual_2d[50//step,:]
deltas_1d = deltas_2d[50//step,:]
# Now visualize these for debugging
savePNG(actual[:,22//step,:], "actual-" + str(lod) + ".png")
savePNG(expect[:,22//step,:], "expect-" + str(lod) + ".png")
savePNG(deltas[:,22//step,:], "deltas-" + str(lod) + ".png")
print("\n{0} LOD {1} check: {2}".format(
filename, lod, ("pass" if ok else "FAIL")))
print("Default", defaultvalue, "unwritten", unwrittenvalue)
print("first sample expect {0} actual {1}".format(
expect[0,0,0], actual[0,0,0]))
print("last sample expect {0} actual {1}".format(
expect[-1,-1,-1], actual[-1,-1,-1]))
print("interesting trace expect", expect_1d,
"interesting trace actual", actual_1d,
"delta", deltas_1d,
sep="\n")
assert ok
def checkRawContents(filename, zgyReaderFactory, defaultvalue, unwrittenvalue, *, maxdelta = 0.001):
"""
As checkContents, but do the value conversion ourselves.
There may be issues with never written bricks.
"""
if zgyReaderFactory == oldzgy.ZgyReader and not HasOldZgy(): return
expect = createFancyBuffer(defaultvalue, unwrittenvalue)
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
dtype = {SampleDataType.int8: np.int8,
SampleDataType.int16: np.int16,
SampleDataType.float: np.float32 }[reader.datatype]
checkmeta(reader)
actual = np.zeros((112, 64, 176), dtype=dtype)
reader.read((0,0,0), actual)
#print("raw...", actual[50,22,:])
if np.issubdtype(dtype, np.integer):
iinfo = np.iinfo(dtype)
actual = actual.astype(np.float32)
a = (reader.datarange[1]-reader.datarange[0])/(iinfo.max-iinfo.min)
b = reader.datarange[0] - a * iinfo.min
actual *= a
actual += b
delta = np.amax(np.abs(expect - actual))
if not delta <= maxdelta:
# A single trace in the "interesting" part of the survey.
print("expect", expect[50,22,:])
print("actual", actual[50,22,:])
print("delta", delta)
assert delta <= maxdelta
def computeStatisticsByRead(filename, zgyReaderFactory):
"""
Read back the entire survey from one of the files created by
createFancyFile() and compute statistics from the bulk data.
Concentrate on sum of samples and count of samples.
Also check the metadata.
"""
with zgyReaderFactory(filename, iocontext = SDCredentials()) as reader:
checkmeta(reader)
data = np.zeros((112, 64, 176), dtype=np.float32)
reader.read((0,0,0), data)
theSum = np.sum(data.flat, dtype=np.float64)
theCount = len(data.flat)
#print("Read sum {0}, sample count {1}".format(theSum, theCount))
#cnt = 0
#for x in (0, 1, 31, 97):
# c | |
<gh_stars>0
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import os
import six
from bson.objectid import ObjectId
from .model_base import AccessControlledModel
from girder import events
from girder.constants import AccessType
from girder.exceptions import ValidationException, GirderException
from girder.utility.model_importer import ModelImporter
from girder.utility.progress import noProgress, setResponseTimeLimit
class Folder(AccessControlledModel):
"""
Folders are used to store items and can also store other folders in
a hierarchical way, like a directory on a filesystem. Every folder has
its own set of access control policies, but by default the access
control list is inherited from the folder's parent folder, if it has one.
Top-level folders are ones whose parent is a user or a collection.
"""
def initialize(self):
self.name = 'folder'
self.ensureIndices(('parentId', 'name', 'lowerName',
([('parentId', 1), ('name', 1)], {})))
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'name', 'public', 'publicFlags', 'description', 'created', 'updated',
'size', 'meta', 'parentId', 'parentCollection', 'creatorId',
'baseParentType', 'baseParentId'))
def validate(self, doc, allowRename=False):
"""
Validate the name and description of the folder, ensure that it is
associated with a valid parent and that it has a unique name.
:param doc: the folder document to validate.
:param allowRename: if True and a folder or item exists with the same
name, rename the folder so that it is unique.
:returns: `the validated folder document`
"""
from .item import Item
doc['name'] = doc['name'].strip()
doc['lowerName'] = doc['name'].lower()
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException('Folder name must not be empty.', 'name')
if not doc['parentCollection'] in ('folder', 'user', 'collection'):
# Internal error; this shouldn't happen
raise GirderException('Invalid folder parent type: %s.' %
doc['parentCollection'],
'girder.models.folder.invalid-parent-type')
name = doc['name']
# If the folder already exists with the current name, don't check.
# Although we don't want duplicate names, they can occur when there are
# simultaneous uploads, and also because Mongo has no guaranteed
# multi-collection uniqueness constraints. If this occurs, and we are
# changing a non-name property, don't validate the name (since that may
# fail). If the name is being changed, validate that it is probably
# unique.
checkName = '_id' not in doc or not self.findOne({'_id': doc['_id'], 'name': name})
n = 0
itemModel = Item()
while checkName:
q = {
'parentId': doc['parentId'],
'name': name,
'parentCollection': doc['parentCollection']
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
dupFolder = self.findOne(q, fields=['_id'])
if doc['parentCollection'] == 'folder':
q = {
'folderId': doc['parentId'],
'name': name
}
dupItem = itemModel.findOne(q, fields=['_id'])
else:
dupItem = None
if dupItem is None and dupFolder is None:
doc['name'] = name
break
if not allowRename:
if dupFolder:
raise ValidationException('A folder with that name '
'already exists here.', 'name')
raise ValidationException('An item with that name already '
'exists here.', 'name')
n += 1
name = '%s (%d)' % (doc['name'], n)
return doc
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
We override load in order to ensure the folder has certain fields
within it, and if not, we add them lazily at read time.
:param id: The id of the resource.
:type id: string or ObjectId
:param user: The user to check access against.
:type user: dict or None
:param level: The required access type for the object.
:type level: AccessType
:param force: If you explicitly want to circumvent access
checking on this resource, set this to True.
:type force: bool
"""
# Ensure we include extra fields to do the migration below
extraFields = {'baseParentId', 'baseParentType', 'parentId', 'parentCollection',
'name', 'lowerName'}
loadFields = self._supplementFields(fields, extraFields)
doc = super(Folder, self).load(
id=id, level=level, user=user, objectId=objectId, force=force, fields=loadFields,
exc=exc)
if doc is not None:
if 'baseParentType' not in doc:
pathFromRoot = self.parentsToRoot(doc, user=user, force=True)
baseParent = pathFromRoot[0]
doc['baseParentId'] = baseParent['object']['_id']
doc['baseParentType'] = baseParent['type']
self.update({'_id': doc['_id']}, {'$set': {
'baseParentId': doc['baseParentId'],
'baseParentType': doc['baseParentType']
}})
if 'lowerName' not in doc:
doc['lowerName'] = doc['name'].lower()
self.update({'_id': doc['_id']}, {'$set': {
'lowerName': doc['lowerName']
}})
if 'meta' not in doc:
doc['meta'] = {}
self.update({'_id': doc['_id']}, {'$set': {
'meta': {}
}})
self._removeSupplementalFields(doc, fields)
return doc
def getSizeRecursive(self, folder):
"""
Calculate the total size of the folder by recursing into all of its
descendant folders.
"""
size = folder['size']
q = {
'parentId': folder['_id'],
'parentCollection': 'folder'
}
for child in self.find(q):
size += self.getSizeRecursive(child)
return size
def setMetadata(self, folder, metadata, allowNull=False):
"""
Set metadata on a folder. A `ValidationException` is thrown in the
cases where the metadata JSON object is badly formed, or if any of the
metadata keys contains a period ('.').
:param folder: The folder to set the metadata on.
:type folder: dict
:param metadata: A dictionary containing key-value pairs to add to
the folder's meta field
:type metadata: dict
:param allowNull: Whether to allow `null` values to be set in the item's
metadata. If set to `False` or omitted, a `null` value will cause that
metadata field to be deleted.
:returns: the folder document
"""
if 'meta' not in folder:
folder['meta'] = {}
# Add new metadata to existing metadata
folder['meta'].update(six.viewitems(metadata))
# Remove metadata fields that were set to null (use items in py3)
if not allowNull:
toDelete = [k for k, v in six.viewitems(metadata) if v is None]
for key in toDelete:
del folder['meta'][key]
folder['updated'] = datetime.datetime.utcnow()
self.validateKeys(folder['meta'])
# Validate and save the item
return self.save(folder)
def deleteMetadata(self, folder, fields):
"""
Delete metadata on a folder. A `ValidationException` is thrown if the
metadata field names contain a period ('.') or begin with a dollar sign
('$').
:param folder: The folder to delete metadata from.
:type folder: dict
:param fields: An array containing the field names to delete from the
folder's meta field
:type field: list
:returns: the folder document
"""
self.validateKeys(fields)
if 'meta' not in folder:
folder['meta'] = {}
for field in fields:
folder['meta'].pop(field, None)
folder['updated'] = datetime.datetime.utcnow()
return self.save(folder)
def _updateDescendants(self, folderId, updateQuery):
"""
This helper is used to update all items and folders underneath a
folder. This is expensive, so think carefully before using it.
:param folderId: The _id of the folder at the root of the subtree.
:param updateQuery: The mongo query to apply to all of the children of
the folder.
:type updateQuery: dict
"""
from .item import Item
self.update(query={
'parentId': folderId,
'parentCollection': 'folder'
}, update=updateQuery, multi=True)
Item().update(query={
'folderId': folderId,
}, update=updateQuery, multi=True)
q = {
'parentId': folderId,
'parentCollection': 'folder'
}
for child in self.find(q):
self._updateDescendants(child['_id'], updateQuery)
def _isAncestor(self, ancestor, descendant):
"""
Returns whether folder "ancestor" is an ancestor of folder "descendant",
or if they are the same folder.
:param ancestor: The folder to test as an ancestor.
:type ancestor: folder
:param descendant: The folder to test as a descendant.
:type descendant: folder
"""
if ancestor['_id'] == descendant['_id']:
return True
if descendant['parentCollection'] != 'folder':
return False
descendant = self.load(descendant['parentId'], force=True)
if descendant is None:
return False
return self._isAncestor(ancestor, descendant)
def move(self, folder, parent, parentType):
"""
Move the given folder from its current parent to another parent object.
Raises an exception if folder is an ancestor of parent.
:param folder: The folder to move.
:type folder: dict
:param parent: The new parent object.
:param parentType: The type of the new parent object (user, collection,
or folder).
:type parentType: str
"""
if (parentType == 'folder' and (
self._isAncestor(folder, parent) or folder['_id'] == parent['_id'])):
raise ValidationException(
'You may not move a folder underneath itself.')
folder['parentId'] = parent['_id']
folder['parentCollection'] = parentType
if parentType == 'folder':
rootType, rootId = parent['baseParentType'], parent['baseParentId']
else:
rootType, rootId = parentType, parent['_id']
if (folder['baseParentType'], folder['baseParentId']) !=\
(rootType, rootId):
def propagateSizeChange(folder, inc):
ModelImporter.model(folder['baseParentType']).increment(query={
'_id': folder['baseParentId']
}, field='size', amount=inc, multi=False)
totalSize = self.getSizeRecursive(folder)
propagateSizeChange(folder, -totalSize)
folder['baseParentType'] = rootType
folder['baseParentId'] = rootId
propagateSizeChange(folder, totalSize)
self._updateDescendants(folder['_id'], {
'$set': {
'baseParentType': rootType,
'baseParentId': rootId
}
})
return self.save(folder)
def clean(self, folder, progress=None, **kwargs):
"""
Delete all contents underneath a folder recursively, but leave the
folder itself.
:param folder: The folder document to delete.
:type folder: dict
:param progress: A progress context to record progress on.
:type progress: girder.utility.progress.ProgressContext or None.
"""
from .item import Item
setResponseTimeLimit()
# Delete all child items
itemModel = Item()
items = itemModel.find({
'folderId': folder['_id']
| |
import copy
import glob
import json
import os
import re
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Union
import numpy as np
import pandas as pd
import pyarrow
import pyarrow.parquet as pq
import scipy.cluster.hierarchy
CAPACITY = "mw"
MERGE = {
"sums": [CAPACITY, "area"],
"means": [
"lcoe",
"interconnect_annuity",
"offshore_spur_miles",
"spur_miles",
"tx_miles",
"site_substation_spur_miles",
"substation_metro_tx_miles",
"site_metro_spur_miles",
"m_popden",
],
"weight": CAPACITY,
"uniques": ["ipm_region", "metro_id"],
}
NREL_ATB_TECHNOLOGY_MAP = {
("utilitypv", None): {"technology": "utilitypv"},
("landbasedwind", None): {"technology": "landbasedwind"},
("offshorewind", None): {"technology": "offshorewind"},
("hydropower", None): {"technology": "hydro"},
**{
("offshorewind", f"otrg{x}"): {
"technology": "offshorewind",
"turbine_type": "fixed",
}
for x in range(1, 8)
},
**{
("offshorewind", f"class{x}"): {
"technology": "offshorewind",
"turbine_type": "fixed",
}
for x in range(1, 8)
},
**{
("offshorewind", f"otrg{x}"): {
"technology": "offshorewind",
"turbine_type": "floating",
}
for x in range(8, 16)
},
**{
("offshorewind", f"class{x}"): {
"technology": "offshorewind",
"turbine_type": "floating",
}
for x in range(8, 16)
},
}
EIA_TECHNOLOGY_MAP = {
"conventionalhydroelectric": {"technology": "hydro", "small": False},
"smallhydroelectric": {"technology": "hydro", "small": True},
"onshorewindturbine": {"technology": "landbasedwind"},
"offshorewindturbine": {"technology": "offshorewind"},
"solarphotovoltaic": {"technology": "utilitypv"},
}
def _normalize(x: Optional[str]) -> Optional[str]:
"""
Normalize string to lowercase, no whitespace, and no underscores.
Examples
--------
>>> _normalize('Offshore Wind')
'offshorewind'
>>> _normalize('OffshoreWind')
'offshorewind'
>>> _normalize('Offshore_Wind')
'offshorewind'
>>> _normalize(None) is None
True
"""
if not x:
return x
return re.sub(r"\s+|_", "", x.lower())
def map_nrel_atb_technology(tech: str, detail: str = None) -> Dict[str, Any]:
"""
Map NREL ATB technology to resource groups.
Parameters
----------
tech
Technology.
detail
Technology detail.
Returns
-------
dict
Key, value pairs identifying one or more resource groups.
Examples
--------
>>> map_nrel_atb_technology('UtilityPV', 'LosAngeles')
{'technology': 'utilitypv'}
>>> map_nrel_atb_technology('LandbasedWind', 'LTRG1')
{'technology': 'landbasedwind'}
>>> map_nrel_atb_technology('OffShoreWind')
{'technology': 'offshorewind'}
>>> map_nrel_atb_technology('OffShoreWind', 'OTRG3')
{'technology': 'offshorewind', 'turbine_type': 'fixed'}
>>> map_nrel_atb_technology('OffShoreWind', 'OTRG7')
{'technology': 'offshorewind', 'turbine_type': 'floating'}
>>> map_nrel_atb_technology('Hydropower')
{'technology': 'hydro'}
>>> map_nrel_atb_technology('Hydropower', 'NSD4')
{'technology': 'hydro'}
>>> map_nrel_atb_technology('Unknown')
{}
"""
tech = _normalize(tech)
detail = _normalize(detail)
group = {}
for k, v in NREL_ATB_TECHNOLOGY_MAP.items():
if (tech == k[0] or not k[0]) and (detail == k[1] or not k[1]):
group.update(v)
return group
def map_eia_technology(tech: str) -> Dict[str, Any]:
"""
Map EIA technology to resource groups.
Parameters
----------
tech
Technology.
Returns
-------
dict
Key, value pairs identifying one or more resource groups.
Examples
--------
>>> map_eia_technology('Solar Photovoltaic')
{'technology': 'utilitypv'}
>>> map_eia_technology('solar_photovoltaic')
{'technology': 'utilitypv'}
>>> map_eia_technology('Onshore Wind Turbine')
{'technology': 'landbasedwind'}
>>> map_eia_technology('Offshore Wind Turbine')
{'technology': 'offshorewind'}
>>> map_eia_technology('Conventional Hydroelectric')
{'technology': 'hydro', 'small': False}
>>> map_eia_technology('Small Hydroelectric')
{'technology': 'hydro', 'small': True}
>>> map_eia_technology('Unknown')
{}
"""
tech = _normalize(tech)
group = {}
for k, v in EIA_TECHNOLOGY_MAP.items():
if tech == k or not k:
group.update(v)
return group
class Table:
"""
Cached interface for tabular data.
Supports parquet and csv formats.
Parameters
----------
path
Path to dataset.
df
In-memory dataframe.
Attributes
----------
path : Union[str, os.PathLike]
Path to the dataset.
df : pd.DataFrame
Cached dataframe.
format : str
Dataset format ('parquet' or 'csv'), or `None` if in-memory only.
columns : list
Dataset column names.
Raises
------
ValueError
Missing either path or dataframe.
ValueError
Dataframe columns are not all strings.
Examples
--------
In-memory dataframe:
>>> df = pd.DataFrame({'id': [1, 2], 'x': [10, 20]})
>>> table = Table(df = df)
>>> table.format is None
True
>>> table.columns
['id', 'x']
>>> table.read()
id x
0 1 10
1 2 20
>>> table.read(columns=['id'])
id
0 1
1 2
>>> table.clear()
>>> table.df is not None
True
File dataset (csv):
>>> import tempfile
>>> fp = tempfile.NamedTemporaryFile()
>>> df.to_csv(fp.name, index=False)
>>> table = Table(path = fp.name)
>>> table.format
'csv'
>>> table.columns
['id', 'x']
>>> table.read(cache=False)
id x
0 1 10
1 2 20
>>> table.df is None
True
>>> table.read(columns=['id'], cache=True)
id
0 1
1 2
>>> table.df is not None
True
>>> table.clear()
>>> table.df is None
True
>>> fp.close()
"""
def __init__(
self, path: Union[str, os.PathLike] = None, df: pd.DataFrame = None
) -> None:
self.path = path
self.df = df
if df is not None:
if any(not isinstance(x, str) for x in df.columns):
raise ValueError("Dataframe columns are not all strings")
self.format = None
self._dataset = None
self._columns = None
if path is not None:
try:
self._dataset = pq.ParquetDataset(path)
self._columns = self._dataset.schema.names
self.format = "parquet"
except pyarrow.lib.ArrowInvalid:
# Assume CSV file
self.format = "csv"
if path is None and df is None:
raise ValueError("Mising either path to tabular data or a pandas DataFrame")
@property
def columns(self) -> list:
if self.df is not None:
return list(self.df.columns)
if self._columns is None:
if self.format == "csv":
self._columns = pd.read_csv(self.path, nrows=0).columns
return list(self._columns)
def read(self, columns: Iterable = None, cache: bool = None) -> pd.DataFrame:
"""
Read data from memory or from disk.
Parameters
----------
columns
Names of column to read. If `None`, all columns are read.
cache
Whether to cache the full dataset in memory. If `None`,
the dataset is cached if `columns` is `None`, and not otherwise.
Returns
-------
pd.DataFrame
Data as a dataframe.
"""
if self.df is not None:
return self.df[columns] if columns is not None else self.df
if cache is None:
cache = columns is None
read_columns = None if cache else columns
if self.format == "csv":
df = pd.read_csv(self.path, usecols=read_columns)
elif self.format == "parquet":
df = self._dataset.read(columns=read_columns).to_pandas()
if cache:
self.df = df
return df[columns] if columns is not None else df
def clear(self) -> None:
"""
Clear the dataset cache.
Only applies if :attr:`path` is set so that the dataset can be reread from file.
"""
if self.path is not None:
self.df = None
class ResourceGroup:
"""
Group of resources sharing common attributes.
Parameters
----------
group
Group metadata.
- `technology` : str
Resource type ('utilitypv', 'landbasedwind', or 'offshorewind').
- `existing` : bool
Whether resources are new (`False`, default) or existing (`True`).
- `tree` : str, optional
The name of the resource metadata attribute by
which to differentiate between multiple precomputed hierarchical trees.
Defaults to `None` (resource group does not represent hierarchical trees).
- `metadata` : str, optional
Relative path to resource metadata dataset (optional if `metadata` is `None`).
- `profiles` : str, optional
Relative path to resource profiles dataset.
- ... and any additional (optional) keys.
metadata
Resource metadata, with one resource per row.
- `id`: int
Resource identifier, unique within the group.
- `ipm_region` : str
IPM region to which the resource delivers power.
- `mw` : float
Maximum resource capacity in MW.
- `lcoe` : float, optional
Levelized cost of energy, used to guide the selection
(from lowest to highest) and clustering (by nearest) of resources.
If missing, selection and clustering is by largest and nearest `mw`.
Resources representing hierarchical trees (see `group.tree`)
require additional attributes.
- `parent_id` : int
Identifier of the resource formed by clustering this resource with the one
other resource with the same `parent_id`.
Only resources with `level` of 1 have no `parent_id`.
- `level` : int
Level of tree where the resource first appears, from `m`
(the number of resources at the base of the tree), to 1.
- `[group.tree]` : Any
Each unique value of this grouping attribute represents a precomputed
hierarchical tree. When clustering resources, every tree is traversed to its
crown before the singleton resources from the trees are clustered together.
The following resource attributes (all float) are propagaged as:
- weighted means (weighted by `mw`):
- `lcoe`
- `interconnect_annuity`
- `tx_miles`
- `spur_miles`
- `offshore_spur_miles`
- `site_substation_spur_miles`
- `substation_metro_tx_miles`
- `site_metro_spur_miles`
- sums:
- `mw`
- `area`
- uniques:
- `ipm_region`
- `metro_id`
profiles
Variable resource capacity profiles with normalized capacity factors
(from 0 to 1) for every hour of the year (either 8760 or 8784 for a leap year).
Each profile must be a column whose name matches the resource `metadata.id`.
path
Directory relative to which the file paths `group.metadata` and `group.profiles`
should be read.
Attributes
----------
group : Dict[str, Any]
metadata : Table
Cached interface to resource metadata.
profiles : Optional[Table]
Cached interface to resource profiles.
Examples
--------
>>> group = {'technology': 'utilitypv'}
>>> metadata = pd.DataFrame({'id': [0, 1], 'ipm_region': ['A', 'A'], 'mw': [1, 2]})
>>> profiles = pd.DataFrame({'0': np.full(8784, 0.1), '1': np.full(8784, 0.4)})
>>> rg = ResourceGroup(group, | |
<filename>py_extrema/utils.py<gh_stars>0
from numba import njit, jit, guvectorize
from collections import OrderedDict
import numpy as np
import numexpr as ne
import attr
import pandas as pd
from itertools import product
from scipy.interpolate import RegularGridInterpolator, interpn
class FiniteDictionary(OrderedDict):
def __init__(self, maxlen, *args, **kwa):
self._maxlen = maxlen
super(FiniteDictionary, self).__init__(*args, **kwa)
@property
def maxlen(self):
return self._maxlen
@maxlen.setter
def maxlen(self, v):
if v < 0:
raise Exception('Invalid maxlen %s', v)
self._maxlen = v
def __setitem__(self, k, v, *args, **kwa):
if len(self) == self.maxlen and k not in self:
# Remove oldest member
self.popitem(False)
super(FiniteDictionary, self).__setitem__(k, v, *args, **kwa)
def last(self):
return self[list(self.keys())[-1]]
def get_xyz_keys(Ndim):
if Ndim <= 3:
keys = ['x', 'y', 'z'][:Ndim]
else:
keys = [f'x{i+1}' for i in range(Ndim)]
return keys
@attr.s(frozen=True)
class CriticalPoints:
pos = attr.ib(converter=np.atleast_2d)
eigvals = attr.ib(converter=np.atleast_2d)
kind = attr.ib(converter=np.atleast_1d)
hessian = attr.ib(converter=np.atleast_3d)
npt = attr.ib(converter=int)
dens = attr.ib(converter=np.atleast_1d)
sigma = attr.ib(converter=np.atleast_1d)
def as_dataframe(self):
Ndim = self.pos.shape[1]
keys = get_xyz_keys(Ndim)
data = {}
for i in range(Ndim):
data[keys[i]] = self.pos[..., i]
data[f'l{i+1}'] = self.eigvals[..., i]
for j in range(i, Ndim):
data[f'h{i+1}{j+1}'] = self.hessian[..., i, j]
data['kind'] = self.kind
data['dens'] = self.dens
return pd.DataFrame(data)
@njit
def unravel_index(index, shape):
n = len(shape)
result = np.zeros(n, dtype=np.int32)
for i in range(n-1, -1, -1):
s = shape[i]
result[i] = index % s
index //= s
return result
def solve(A, B):
'''Solve the equation A*X = B.'''
N = A.shape[-1]
if N == 2:
a = A[..., 0, 0]
b = A[..., 1, 1]
c = A[..., 0, 1]
b1 = B[..., 0]
b2 = B[..., 1]
det = ne.evaluate('a*b - c**2')
X = np.zeros(B.shape, order='F')
X[..., 0] = ne.evaluate('(b*b1 - b2*c) / det')
X[..., 1] = ne.evaluate('(a*b2 - b1*c) / det')
return X
elif N == 3:
a = A[..., 0, 0]
b = A[..., 1, 1]
c = A[..., 2, 2]
d = A[..., 0, 1]
e = A[..., 0, 2]
f = A[..., 1, 2]
b1 = B[..., 0]
b2 = B[..., 1]
b3 = B[..., 2]
d2 = d**2
f2 = f**2
e2 = e**2
det = ne.evaluate('a*b*c - a*f2 - b*e2 - c*d2 + 2*d*e*f')
X = np.zeros(B.shape, order='F')
X[..., 0] = ne.evaluate('(b*b1*c - b2*c*d - b*b3*e + b3*d*f + b2*e*f - b1*f2) / det')
X[..., 1] = ne.evaluate('(a*b2*c - b1*c*d + b3*d*e - b2*e2 - a*b3*f + b1*e*f) / det')
X[..., 2] = ne.evaluate('(a*b*b3 - b3*d2 - b*b1*e + b2*d*e - a*b2*f + b1*d*f) / det')
return X
else:
return np.linalg.solve(A, B)
@guvectorize(['void(float64[:], float64[:,:,:,:], float64[:])'],
'(N),(M,i,i,i)->(M)')
def trilinear_interpolation(pos, v, ret):
'''Compute the trilinear interpolation of data at given position
Arguments
---------
pos : (Ndim, ) float array
The position w.r.t to the lower left edget of the cube.
v : (M, 2, 2, 2) float array
The value at the edges.
Returns
-------
interp : float
The interpolated value.
Notes
-----
The original code is from
http://paulbourke.net/miscellaneous/interpolation/
'''
xl, yl, zl = pos
xr, yr, zr = 1-pos
# Note the (inverse) order here!
x = (xr, xl)
y = (yr, yl)
z = (zr, zl)
ret[:] = 0
for i in range(2):
for j in range(2):
for k in range(2):
vol = x[i] * y[j] * z[k]
ret[:] += v[:, i, j, k] * vol
@njit
def gradient(A, axis, dx=1):
out = np.zeros_like(A)
ijk = np.array([0, 0, 0], dtype=np.int32)
ijkl = np.array([0, 0, 0], dtype=np.int32)
ijkr = np.array([0, 0, 0], dtype=np.int32)
i0 = j0 = k0 = 0
iN, jN, kN = A.shape
if axis == 0:
i0 += 1
iN -= 1
elif axis == 1:
j0 += 1
jN -= 1
elif axis == 2:
k0 += 1
kN -= 1
for i in range(i0, iN):
ijk[0] = ijkl[0] = ijkr[0] = i
if axis == 0:
ijkl[0] -= 1
ijkr[0] += 1
for j in range(j0, jN):
ijk[1] = ijkl[1] = ijkr[1] = j
if axis == 1:
ijkl[1] -= 1
ijkr[1] += 1
for k in range(k0, kN):
ijk[2] = ijkl[2] = ijkr[2] = k
if axis == 2:
ijkl[2] -= 1
ijkr[2] += 1
out[i, j, k] = (A[ijkr[0], ijkr[1], ijkr[2]] - A[ijkl[0], ijkl[1], ijkl[2]]) / 2 / dx
# Left edge
if axis == 0:
i0 = 0
iN = 1
elif axis == 1:
j0 = 0
jN = 1
elif axis == 2:
k0 = 0
kN = 1
for i in range(i0, iN):
ijk[0] = ijkr[0] = i
if axis == 0:
ijkr[0] += 1
for j in range(j0, jN):
ijk[1] = ijkr[1] = j
if axis == 1:
ijkr[1] += 1
for k in range(k0, kN):
ijk[2] = ijkr[2] = k
if axis == 2:
ijkr[2] += 1
out[i, j, k] = (A[ijkr[0], ijkr[1], ijkr[2]] - A[ijk[0], ijk[1], ijk[2]]) / dx
# Right edge
if axis == 0:
i0 = A.shape[0]-1
iN = A.shape[0]
elif axis == 1:
j0 = A.shape[1]-1
jN = A.shape[1]
elif axis == 2:
k0 = A.shape[2]-1
kN = A.shape[2]
for i in range(i0, iN):
ijk[0] = ijkl[0] = i
if axis == 0:
ijkl[0] -= 1
for j in range(j0, jN):
ijk[1] = ijkl[1] = j
if axis == 1:
ijkl[1] -= 1
for k in range(k0, kN):
ijk[2] = ijkl[2] = k
if axis == 2:
ijkl[2] -= 1
out[i, j, k] = (A[ijk[0], ijk[1], ijk[2]] - A[ijkl[0], ijkl[1], ijkl[2]]) / dx
return out
@jit(looplift=True)
def measure_hessian_3d(position, data, LE=np.array([0, 0, 0])):
'''Compute the value of the hessian of the field at the given position.
Arguments
---------
position : ndarray (Npt, Ndim)
The position of the points in space in pixel units.
data : ndarray (Npt, Npt, Npt)
The field itself
'''
LE = np.asarray(LE)
Npt = len(position)
N = data.shape[0]
Ndim = data.ndim
buff = np.empty((6, 6, 6))
# Contains the value of h_ij at the corner
hij_buff = np.empty((6, 2, 2, 2))
tmp_buff = np.empty((6, 6, 6))
ret = np.empty((Npt, 3, 3))
ipos = np.empty(Ndim, dtype=np.int32)
jpos = np.empty(Ndim, dtype=np.int32)
dpos = np.empty(Ndim, dtype=np.float64)
for ipt in range(Npt):
pos = position[ipt] - LE
ipos[:] = pos-2
jpos[:] = ipos+6
dpos[:] = pos - ipos - 2
# Copy data with periodic boundaries
for i0, i in enumerate(range(ipos[0], jpos[0])):
for j0, j in enumerate(range(ipos[1], jpos[1])):
for k0, k in enumerate(range(ipos[2], jpos[2])):
buff[i0, j0, k0] = data[i % N, j % N, k % N]
# Compute hessian using finite difference
ii = 0
for i in range(3):
for jdim in range(i+1):
tmp_buff[:] = gradient(gradient(buff, axis=i), axis=jdim)
hij_buff[ii, :, :, :] = tmp_buff[2:4, 2:4, 2:4]
ii += 1
# Perform trilinear interpolation of the hessian
tmp = trilinear_interpolation(dpos, hij_buff)
ii = 0
for i in range(3):
for jdim in range(i+1):
ret[ipt, i, jdim] = \
ret[ipt, jdim, i] = tmp[ii]
ii += 1
return ret
def measure_hessian(position, data, LE=np.array([0, 0, 0])):
Ndim = data.ndim
Npt = len(position)
N = data.shape[0]
if Ndim == 3:
return measure_hessian_3d(position, data, LE)
# Pad one in each dimension for the regular grid interpolation
data_padded = np.pad(data, [(1, 1)]*Ndim, 'wrap')
grid = [np.arange(-1, N+1)]*Ndim
grad = [np.gradient(data_padded, axis=_) for _ in range(Ndim)]
hess_flat = np.stack([
np.gradient(grad[i], axis=j)
for i in range(Ndim)
for j in range(i, Ndim)], axis=-1)
interpolator = RegularGridInterpolator(grid, hess_flat)
# Unpack in Ndim x Ndim
hess_interp = interpolator(position)
hess_at_pt = np.empty((Npt, Ndim, Ndim))
ii = 0
for i in range(Ndim):
for j in range(i, Ndim):
hess_at_pt[:, i, j] = hess_at_pt[:, j, i] = hess_interp[:, ii]
ii += 1
return hess_at_pt
@jit(looplift=True)
def measure_gradient(position, data, LE=np.array([0, 0, 0])):
'''Compute the value of the gradient of the field at the given position.
Arguments
---------
position : ndarray (Npt, Ndim)
The position of the points in space in pixel units.
data : ndarray (Npt, Npt, Npt)
The field itself
'''
LE = np.asarray(LE)
Npt = len(position)
N = data.shape[0]
buff = np.empty((4, 4, 4))
# Contains the value of h_ij at the corner
grad_buff = np.empty((3, 2, 2, 2))
tmp_buff = np.empty((4, 4, 4))
ret = np.empty((Npt, 3))
ipos = np.empty(3, dtype=np.int32)
jpos | |
'zhū',
0x7027: 'lóng,shuāng',
0x7028: 'lài',
0x7029: 'duì',
0x702A: 'fàn',
0x702B: 'hú',
0x702C: 'lài',
0x702D: 'shū',
0x702E: 'lián',
0x702F: 'yíng',
0x7030: 'mí',
0x7031: 'jì',
0x7032: 'liàn',
0x7033: 'jiàn,zùn',
0x7034: 'yīng,yǐng,yìng',
0x7035: 'fèn',
0x7036: 'lín',
0x7037: 'yì',
0x7038: 'jiān',
0x7039: 'yuè',
0x703A: 'chán',
0x703B: 'dài',
0x703C: 'ráng,nǎng',
0x703D: 'jiǎn',
0x703E: 'lán',
0x703F: 'fán',
0x7040: 'shuàng',
0x7041: 'yuān',
0x7042: 'zhuó,jiào,zé',
0x7043: 'fēng',
0x7044: 'shè',
0x7045: 'lěi',
0x7046: 'lán',
0x7047: 'cóng',
0x7048: 'qú',
0x7049: 'yōng',
0x704A: 'qián',
0x704B: 'fǎ',
0x704C: 'guàn',
0x704D: 'jué',
0x704E: 'yàn',
0x704F: 'hào',
0x7050: 'yíng',
0x7051: 'sǎ',
0x7052: 'zàn,cuán',
0x7053: 'luán,luàn',
0x7054: 'yàn',
0x7055: 'lí',
0x7056: 'mǐ',
0x7057: 'shàn',
0x7058: 'tān',
0x7059: 'dǎng,tǎng',
0x705A: 'jiǎo',
0x705B: 'chǎn',
0x705C: 'yíng',
0x705D: 'hào',
0x705E: 'bà',
0x705F: 'zhú',
0x7060: 'lǎn',
0x7061: 'lán',
0x7062: 'nǎng',
0x7063: 'wān',
0x7064: 'luán',
0x7065: 'xún,quán,quàn',
0x7066: 'xiǎn',
0x7067: 'yàn',
0x7068: 'gàn',
0x7069: 'yàn',
0x706A: 'yù',
0x706B: 'huǒ',
0x706C: 'huǒ,biāo',
0x706D: 'miè',
0x706E: 'guāng',
0x706F: 'dēng',
0x7070: 'huī',
0x7071: 'xiāo',
0x7072: 'xiāo',
0x7073: 'huī',
0x7074: 'hōng',
0x7075: 'líng',
0x7076: 'zào',
0x7077: 'zhuàn',
0x7078: 'jiǔ',
0x7079: 'zhà,yù',
0x707A: 'xiè',
0x707B: 'chì',
0x707C: 'zhuó',
0x707D: 'zāi',
0x707E: 'zāi',
0x707F: 'càn',
0x7080: 'yáng',
0x7081: 'qì',
0x7082: 'zhōng',
0x7083: 'fén,bèn',
0x7084: 'niǔ',
0x7085: 'jiǒng,guì',
0x7086: 'wén',
0x7087: 'pū',
0x7088: 'yì',
0x7089: 'lú',
0x708A: 'chuī',
0x708B: 'pī',
0x708C: 'kài',
0x708D: 'pàn',
0x708E: 'yán',
0x708F: 'yán',
0x7090: 'pàng,fēng',
0x7091: 'mù',
0x7092: 'chǎo',
0x7093: 'liào',
0x7094: 'quē,guì',
0x7095: 'kàng',
0x7096: 'dùn',
0x7097: 'guāng',
0x7098: 'xìn',
0x7099: 'zhì',
0x709A: 'guāng',
0x709B: 'guāng',
0x709C: 'wěi',
0x709D: 'qiàng',
0x709E: 'biān',
0x709F: 'dá',
0x70A0: 'xiá',
0x70A1: 'zhēng',
0x70A2: 'zhú',
0x70A3: 'kě',
0x70A4: 'zhào,zhāo',
0x70A5: 'fú',
0x70A6: 'bá',
0x70A7: 'xiè',
0x70A8: 'xiè',
0x70A9: 'lìng',
0x70AA: 'zhuō,chù',
0x70AB: 'xuàn',
0x70AC: 'jù',
0x70AD: 'tàn',
0x70AE: 'páo,bāo,pào',
0x70AF: 'jiǒng',
0x70B0: 'páo,fǒu',
0x70B1: 'tái',
0x70B2: 'tái',
0x70B3: 'bǐng',
0x70B4: 'yǎng',
0x70B5: 'tōng',
0x70B6: 'shǎn,qián,shān',
0x70B7: 'zhù',
0x70B8: 'zhà,zhá',
0x70B9: 'diǎn',
0x70BA: 'wéi,wèi',
0x70BB: 'shí',
0x70BC: 'liàn',
0x70BD: 'chì',
0x70BE: 'huǎng',
0x70BF: 'zhōu',
0x70C0: 'hū',
0x70C1: 'shuò',
0x70C2: 'làn',
0x70C3: 'tīng',
0x70C4: 'jiǎo,yào',
0x70C5: 'xù',
0x70C6: 'héng',
0x70C7: 'quǎn',
0x70C8: 'liè',
0x70C9: 'huàn',
0x70CA: 'yáng,yàng',
0x70CB: 'xiāo',
0x70CC: 'xiū',
0x70CD: 'xiǎn',
0x70CE: 'yín',
0x70CF: 'wū',
0x70D0: 'zhōu',
0x70D1: 'yáo',
0x70D2: 'shì',
0x70D3: 'wēi',
0x70D4: 'tóng,dòng',
0x70D5: 'miè',
0x70D6: 'zāi',
0x70D7: 'kài',
0x70D8: 'hōng',
0x70D9: 'lào,luò',
0x70DA: 'xiá',
0x70DB: 'zhú',
0x70DC: 'xuǎn',
0x70DD: 'zhēng',
0x70DE: 'pò',
0x70DF: 'yān',
0x70E0: 'huí,huǐ',
0x70E1: 'guāng',
0x70E2: 'chè',
0x70E3: 'huī',
0x70E4: 'kǎo',
0x70E5: 'jù',
0x70E6: 'fán',
0x70E7: 'shāo',
0x70E8: 'yè',
0x70E9: 'huì',
0x70EB: 'tàng',
0x70EC: 'jìn',
0x70ED: 'rè',
0x70EE: 'liè',
0x70EF: 'xī',
0x70F0: 'fú,páo',
0x70F1: 'jiǒng',
0x70F2: 'xiè,chè',
0x70F3: 'pǔ',
0x70F4: 'tīng',
0x70F5: 'zhuó',
0x70F6: 'tǐng',
0x70F7: 'wán',
0x70F8: 'hǎi',
0x70F9: 'pēng',
0x70FA: 'lǎng',
0x70FB: 'yàn',
0x70FC: 'xù',
0x70FD: 'fēng',
0x70FE: 'chì',
0x70FF: 'róng',
0x7100: 'hú',
0x7101: 'xī',
0x7102: 'shū',
0x7103: 'hè',
0x7104: 'xūn,hūn',
0x7105: 'kù',
0x7106: 'juān,yè',
0x7107: 'xiāo',
0x7108: 'xī',
0x7109: 'yān',
0x710A: 'hàn',
0x710B: 'zhuàng',
0x710C: 'qū,jùn',
0x710D: 'dì',
0x710E: 'xiè,chè',
0x710F: 'jí,qì',
0x7110: 'wù',
0x7111: 'yān',
0x7112: 'lǚ',
0x7113: 'hán',
0x7114: 'yàn',
0x7115: 'huàn',
0x7116: 'mèn',
0x7117: 'jú',
0x7118: 'dào,tāo',
0x7119: 'bèi',
0x711A: 'fén',
0x711B: 'lìn',
0x711C: 'kūn',
0x711D: 'hùn',
0x711E: 'tūn',
0x711F: 'xī',
0x7120: 'cuì',
0x7121: 'wú,mó',
0x7122: 'hōng',
0x7123: 'chǎo,jù',
0x7124: 'fǔ',
0x7125: 'wò,ài',
0x7126: 'jiāo',
0x7127: 'zǒng,cōng',
0x7128: 'fèng',
0x7129: 'píng',
0x712A: 'qióng',
0x712B: 'ruò',
0x712C: 'xī,yì',
0x712D: 'qióng',
0x712E: 'xìn',
0x712F: 'zhuō,chāo',
0x7130: 'yàn',
0x7131: 'yàn',
0x7132: 'yì',
0x7133: 'jué',
0x7134: 'yù',
0x7135: 'gàng',
0x7136: 'rán',
0x7137: 'pí',
0x7138: 'xiǒng,yīng',
0x7139: 'gàng',
0x713A: 'shēng',
0x713B: 'chàng',
0x713C: 'shāo',
0x713D: 'xiǒng,yīng',
0x713E: 'niǎn',
0x713F: 'gēng',
0x7140: 'qū',
0x7141: 'chén',
0x7142: 'hè',
0x7143: 'kuǐ',
0x7144: 'zhǒng',
0x7145: 'duàn',
0x7146: 'xiā',
0x7147: 'huī,yùn,xūn',
0x7148: 'fèng',
0x7149: 'liàn',
0x714A: 'xuān',
0x714B: 'xīng',
0x714C: 'huáng',
0x714D: 'jiǎo,qiāo',
0x714E: 'jiān',
0x714F: 'bì',
0x7150: 'yīng',
0x7151: 'zhǔ',
0x7152: 'wěi',
0x7153: 'tuān',
0x7154: 'shǎn,qián,shān',
0x7155: 'xī,yí',
0x7156: 'nuǎn,xuān',
0x7157: 'nuǎn',
0x7158: 'chán',
0x7159: 'yān',
0x715A: 'jiǒng',
0x715B: 'jiǒng',
0x715C: 'yù',
0x715D: 'mèi',
0x715E: 'shā,shà',
0x715F: 'wèi',
0x7160: 'yè,zhá',
0x7161: 'jìn',
0x7162: 'qióng',
0x7163: 'róu',
0x7164: 'méi',
0x7165: 'huàn',
0x7166: 'xù',
0x7167: 'zhào',
0x7168: 'wēi',
0x7169: 'fán',
0x716A: 'qiú',
0x716B: 'suì',
0x716C: 'yáng,yàng',
0x716D: 'liè',
0x716E: 'zhǔ',
0x716F: 'jiē',
0x7170: 'zào',
0x7171: 'guā',
0x7172: 'bāo',
0x7173: 'hú',
0x7174: 'yūn,yǔn',
0x7175: 'nǎn',
0x7176: 'shì',
0x7177: 'huǒ',
0x7178: 'biān',
0x7179: 'gòu',
0x717A: 'tuì',
0x717B: 'táng',
0x717C: 'chǎo',
0x717D: 'shān',
0x717E: 'ēn,yūn',
0x717F: 'bó',
0x7180: 'huǎng',
0x7181: 'xié',
0x7182: 'xì',
0x7183: 'wù',
0x7184: 'xī',
0x7185: 'yūn,yǔn',
0x7186: 'hé',
0x7187: 'hè,xiāo',
0x7188: 'xī',
0x7189: 'yún',
0x718A: 'xióng',
0x718B: 'xióng',
0x718C: 'shǎn',
0x718D: 'qióng',
0x718E: 'yào',
0x718F: 'xūn,xùn',
0x7190: 'mì',
0x7191: 'lián',
0x7192: 'yíng',
0x7193: 'wǔ',
0x7194: 'róng',
0x7195: 'gòng',
0x7196: 'yàn',
0x7197: 'qiàng',
0x7198: 'liū',
0x7199: 'xī',
0x719A: 'bì',
0x719B: 'biāo',
0x719C: 'cōng,zǒng',
0x719D: 'lù,āo',
0x719E: 'jiān',
0x719F: 'shú,shóu',
0x71A0: 'yì',
0x71A1: 'lóu',
0x71A2: 'péng,fēng',
0x71A3: 'suī,cuǐ',
0x71A4: 'yì',
0x71A5: 'tēng,tōng',
0x71A6: 'jué',
0x71A7: 'zōng',
0x71A8: 'yùn,yù',
0x71A9: 'hù',
0x71AA: 'yí',
0x71AB: 'zhì',
0x71AC: 'āo,áo',
0x71AD: 'wèi',
0x71AE: 'liǔ',
0x71AF: 'hàn,rǎn',
0x71B0: 'ōu,ǒu',
0x71B1: 'rè',
0x71B2: 'jiǒng',
0x71B3: 'màn',
0x71B4: 'kūn',
0x71B5: 'shāng',
0x71B6: 'cuàn',
0x71B7: 'zèng',
0x71B8: 'jiān',
0x71B9: 'xī',
0x71BA: 'xī',
0x71BB: 'xī',
0x71BC: 'yì',
0x71BD: 'xiào',
0x71BE: 'chì',
0x71BF: 'huáng,huǎng',
0x71C0: 'chǎn,dǎn,chàn',
0x71C1: 'yè',
0x71C2: 'tán',
0x71C3: 'rán',
0x71C4: 'yàn',
0x71C5: 'xún',
0x71C6: 'qiāo',
0x71C7: 'jùn',
0x71C8: 'dēng',
0x71C9: 'dùn',
0x71CA: 'shēn',
0x71CB: 'jiāo,qiáo,jué,zhuó',
0x71CC: 'fén',
0x71CD: 'sī',
0x71CE: 'liáo,liǎo',
0x71CF: 'yù',
0x71D0: 'lín',
0x71D1: 'tóng,dòng',
0x71D2: 'shāo',
0x71D3: 'fén',
0x71D4: 'fán',
0x71D5: 'yàn,yān',
0x71D6: 'xún',
0x71D7: 'làn',
0x71D8: 'měi',
0x71D9: 'tàng',
0x71DA: 'yì',
0x71DB: 'jiǒng',
0x71DC: 'mèn',
0x71DD: 'zhǔ',
0x71DE: 'jiǎo',
0x71DF: 'yíng',
0x71E0: 'yù',
0x71E1: 'yì',
0x71E2: 'xué',
0x71E3: 'lán',
0x71E4: 'tài,liè',
0x71E5: 'zào',
0x71E6: 'càn',
0x71E7: 'suì',
0x71E8: 'xī',
0x71E9: 'què',
0x71EA: 'zǒng',
0x71EB: 'lián',
0x71EC: 'huǐ',
0x71ED: 'zhú',
0x71EE: 'xiè',
0x71EF: 'líng',
0x71F0: 'wēi',
0x71F1: 'yì',
0x71F2: 'xié',
0x71F3: 'zhào',
0x71F4: 'huì',
0x71F5: 'dá',
0x71F6: 'nóng',
0x71F7: 'lán',
0x71F8: 'xū',
0x71F9: 'xiǎn',
0x71FA: 'hè',
0x71FB: 'xūn',
0x71FC: 'jìn',
0x71FD: 'chóu',
0x71FE: 'dào,tāo',
0x71FF: 'yào',
0x7200: 'hè',
0x7201: 'làn',
0x7202: 'biāo',
0x7203: 'róng,yíng',
0x7204: 'lì,liè',
0x7205: 'mò',
0x7206: 'bào',
0x7207: 'ruò',
0x7208: 'lǜ',
0x7209: 'là,liè',
0x720A: 'āo',
0x720B: 'xūn,xùn',
0x720C: 'kuàng,huǎng',
0x720D: 'shuò',
0x720E: 'liáo,liǎo',
0x720F: 'lì',
0x7210: 'lú',
0x7211: 'jué',
0x7212: 'liáo,liǎo',
0x7213: 'yàn,xún',
0x7214: 'xī',
0x7215: 'xiè',
0x7216: 'lóng',
0x7217: 'yè',
0x7218: 'cān',
0x7219: 'rǎng',
0x721A: 'yuè',
0x721B: 'làn',
0x721C: 'cóng',
0x721D: 'jué',
0x721E: 'chóng',
0x721F: 'guàn',
0x7220: 'qú',
0x7221: 'chè',
0x7222: 'mí',
0x7223: 'tǎng',
0x7224: 'làn',
0x7225: 'zhú',
0x7226: 'lǎn,làn',
0x7227: 'líng',
0x7228: 'cuàn',
0x7229: 'yù',
0x722A: 'zhǎo,zhuǎ',
0x722B: 'zhǎo,zhuǎ',
0x722C: 'pá',
0x722D: 'zhēng',
0x722E: 'páo',
0x722F: 'chēng,chèn',
0x7230: 'yuán',
0x7231: 'ài',
0x7232: 'wéi,wèi',
0x7233: 'han',
0x7234: 'jué',
0x7235: 'jué',
0x7236: 'fù,fǔ',
0x7237: 'yé',
0x7238: 'bà',
0x7239: 'diē',
0x723A: 'yé',
0x723B: 'yáo',
0x723C: 'zǔ',
0x723D: 'shuǎng',
0x723E: 'ěr',
0x723F: 'pán',
0x7240: 'chuáng',
0x7241: 'kē',
0x7242: 'zāng',
0x7243: 'dié',
0x7244: 'qiāng',
0x7245: 'yōng',
0x7246: 'qiáng',
0x7247: 'piàn,piān',
0x7248: 'bǎn',
0x7249: 'pàn',
0x724A: 'cháo',
0x724B: 'jiān',
0x724C: 'pái',
0x724D: 'dú',
0x724E: 'chuāng',
0x724F: 'yú',
0x7250: 'zhá',
0x7251: 'biān,miàn',
0x7252: 'dié',
0x7253: 'bǎng',
0x7254: 'bó',
0x7255: 'chuāng',
0x7256: 'yǒu',
0x7257: 'yǒu,yōng',
0x7258: 'dú',
0x7259: 'yá',
0x725A: 'chēng,chèng',
0x725B: 'niú',
0x725C: 'niú',
0x725D: 'pìn',
0x725E: 'jiū,lè',
0x725F: 'móu,mù',
0x7260: 'tā',
0x7261: 'mǔ',
0x7262: 'láo',
0x7263: 'rèn',
0x7264: 'māng',
0x7265: 'fāng',
0x7266: 'máo',
0x7267: 'mù',
0x7268: 'gāng',
0x7269: 'wù',
0x726A: 'yàn',
0x726B: 'gē,qiú',
0x726C: 'bèi',
0x726D: 'sì',
0x726E: 'jiàn',
0x726F: 'gǔ',
0x7270: 'yòu,chōu',
0x7271: 'kē',
0x7272: 'shēng',
0x7273: 'mǔ',
0x7274: 'dǐ',
0x7275: 'qiān',
0x7276: 'quàn',
0x7277: 'quán',
0x7278: 'zì',
0x7279: 'tè',
0x727A: 'xī',
0x727B: 'máng',
0x727C: 'kēng',
0x727D: 'qiān',
0x727E: 'wǔ',
0x727F: 'gù',
0x7280: 'xī',
0x7281: 'lí',
0x7282: 'lí',
0x7283: 'pǒu',
0x7284: 'jī',
0x7285: 'gāng',
0x7286: 'zhí,tè',
0x7287: 'bēn',
0x7288: 'quán',
0x7289: 'chún',
0x728A: 'dú',
0x728B: 'jù',
0x728C: 'jiā',
0x728D: 'jiān,qián',
0x728E: 'fēng',
0x728F: 'piān',
0x7290: 'kē',
0x7291: 'jú',
0x7292: 'kào',
0x7293: 'chú',
0x7294: 'xì',
0x7295: 'bèi',
0x7296: 'luò',
0x7297: 'jiè',
0x7298: 'má',
0x7299: 'sān',
0x729A: 'wèi',
0x729B: 'máo,lí',
0x729C: 'dūn',
0x729D: 'tóng',
0x729E: 'qiáo',
0x729F: 'jiàng',
0x72A0: 'xī',
0x72A1: 'lì',
0x72A2: 'dú',
0x72A3: 'liè',
0x72A4: 'pái',
0x72A5: 'piāo',
0x72A6: 'bào',
0x72A7: | |
% element, item, re.IGNORECASE):
#if item.split(".")[0].lower() == element.lower() or item.split("_")[0].lower() == element.lower():
if re.match("(%s)(.*)(upf)" % element, item, re.IGNORECASE) or re.match("(%s)(_*)(upf)" % element, item, re.IGNORECASE):
shutil.copyfile(item, os.path.join(directory, item))
break
self.arts.pseudo.dir = os.path.abspath(directory)
self.control.set_params({"pseudo_dir": os.path.abspath(directory)})
#
os.chdir(directory)
with open("relax.in.template", 'w') as fout:
self.control.to_in(fout)
self.system.to_in(fout)
self.electrons.to_in(fout)
self.ions.to_in(fout)
coordtype = "crystal" # use crystal here so we could only change cell when opt cell
fout.write("ATOMIC_SPECIES\n")
all_file = os.listdir(self.arts.pseudo.dir)
for element in self.arts.xyz.specie_labels:
for item in all_file:
if re.match("(%s)(.*)(upf)" % (element), item, re.IGNORECASE):
fout.write("%s %f %s\n" % (element, base.element[element].mass, item))
break
fout.write("\n")
if coordtype == "angstrom":
fout.write("ATOMIC_POSITIONS angstrom\n")
if self.arts.ifstatic == True:
for atom in self.arts.xyz.atoms:
fout.write("%s\t%.9f\t%.9f\t%.9f\n" % (atom.name, atom.x, atom.y, atom.z))
elif self.arts.ifstatic == False:
for atom in self.arts.xyz.atoms:
fout.write("%s\t%.9f\t%.9f\t%.9f" % (atom.name, atom.x, atom.y, atom.z))
for fix in atom.fix:
if fix == True:
fout.write("\t0")
elif fix == False:
fout.write("\t1")
fout.write("\n")
else:
print("===============================================\n")
print("warning: qe.base.arts.to_in():\n")
print("arts.ifstatic could only be True or False\n")
sys.exit(1)
fout.write("\n")
elif coordtype == "crystal":
# crystal namely fractional coordinate can be convert from cartesian coordinates
# the conversion process is like transformation of presentation in quantum mechanics
# the convmat is bulid to do the conversion
#latcell = np.array(self.xyz.cell)
#latcell = latcell.reshape(3, 3)
latcell = np.array(self.arts.xyz.cell)
convmat = np.linalg.inv(latcell.T)
crystal_coord = np.zeros([self.arts.xyz.natom, 3])
for i in range(self.arts.xyz.natom):
crystal_coord[i] = convmat.dot(np.array([self.arts.xyz.atoms[i].x, self.arts.xyz.atoms[i].y, self.arts.xyz.atoms[i].z]))
#
fout.write("ATOMIC_POSITIONS crystal\n")
if self.arts.ifstatic == True:
for k in range(self.arts.xyz.natom):
fout.write("%s\t%.9f\t%.9f\t%.9f\n" % (self.arts.xyz.atoms[k].name, crystal_coord[k, 0], crystal_coord[k, 1], crystal_coord[k, 2]))
elif self.arts.ifstatic == False:
for k in range(self.arts.xyz.natom):
fout.write("%s\t%.9f\t%.9f\t%.9f" % (self.arts.xyz.atoms[k].name, crystal_coord[k, 0], crystal_coord[k, 1], crystal_coord[k, 2]))
for fix in self.arts.xyz.atoms[k].fix:
if fix == True:
fout.write("\t0")
elif fix == False:
fout.write("\t1")
fout.write("\n")
else:
print("===============================================\n")
print("warning: qe.base.arts.to_in():\n")
print("arts.ifstatic could only be True or False\n")
sys.exit(1)
fout.write("\n")
# end crystal type ATOMIC_POSITIONS
# writing KPOINTS to the fout
self.arts.write_kpoints(fout)
# =========================
#
# writing forces act on atoms
if self.arts.atomic_forces_status == True:
self.arts.write_atomic_forces(fout)
# =========================
for i_batch_a in range(n_batch_a):
for i_batch_c in range(n_batch_c):
# gen llhpc script
with open("relax-tetragonal-%d-%d.slurm" % (i_batch_a, i_batch_c), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("#!/bin/bash\n")
fout.write("#SBATCH -p %s\n" % self.run_params["partition"])
fout.write("#SBATCH -N %d\n" % self.run_params["nodes"])
fout.write("#SBATCH -n %d\n" % self.run_params["ntask"])
fout.write("#SBATCH -J %s-%d-%d\n" % (self.run_params["jobname"], i_batch_a, i_batch_c))
fout.write("#SBATCH -o %s\n" % self.run_params["stdout"])
fout.write("#SBATCH -e %s\n" % self.run_params["stderr"])
#fout.write("mpirun -np $NP -machinefile $PBS_NODEFILE %s < %s > %s\n" % (cmd, inpname, output))
a = np.sqrt(self.arts.xyz.cell[0][0]**2+self.arts.xyz.cell[0][1]**2+self.arts.xyz.cell[0][2]**2)
b = np.sqrt(self.arts.xyz.cell[1][0]**2+self.arts.xyz.cell[1][1]**2+self.arts.xyz.cell[1][2]**2)
c = np.sqrt(self.arts.xyz.cell[2][0]**2+self.arts.xyz.cell[2][1]**2+self.arts.xyz.cell[2][2]**2)
fout.write("a_in=%f\n" % a)
fout.write("b_in=%f\n" % b)
fout.write("c_in=%f\n" % c)
fout.write("a1=%f\n" % self.arts.xyz.cell[0][0])
fout.write("a2=%f\n" % self.arts.xyz.cell[0][1])
fout.write("a3=%f\n" % self.arts.xyz.cell[0][2])
fout.write("b1=%f\n" % self.arts.xyz.cell[1][0])
fout.write("b2=%f\n" % self.arts.xyz.cell[1][1])
fout.write("b3=%f\n" % self.arts.xyz.cell[1][2])
fout.write("c1=%f\n" % self.arts.xyz.cell[2][0])
fout.write("c2=%f\n" % self.arts.xyz.cell[2][1])
fout.write("c3=%f\n" % self.arts.xyz.cell[2][2])
range_a_start = range_a[0] + i_batch_a * self.batch_a * range_a[2]
range_a_end = range_a[0] + (i_batch_a+1) * self.batch_a * range_a[2] - range_a[2] / 2
# - range_a[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_a_end > range_a[1]:
range_a_end = range_a[1]
range_c_start = range_c[0] + i_batch_c * self.batch_c * range_c[2]
range_c_end = range_c[0] + (i_batch_c+1) * self.batch_c * range_c[2] - range_c[2] / 2
# - range_c[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_c_end > range_c[1]:
range_c_end = range_c[1]
if na >= 2:
# a is optimized
fout.write("for a in `seq -w %f %f %f`\n" % (a+range_a_start, range_a[2], a+range_a_end))
fout.write("do\n")
if nc >= 2:
# optimize both a and c
fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end))
fout.write("do\n")
fout.write(" cp relax.in.template relax-${a}-${c}.in\n")
fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | bc`)\n")
fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | bc`)\n")
fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | bc`)\n")
fout.write(" cat >> relax-${a}-${c}.in <<EOF\n")
fout.write("\n")
fout.write("CELL_PARAMETERS angstrom\n")
fout.write("${vec11} ${vec12} ${vec13}\n")
fout.write("${vec21} ${vec22} ${vec23}\n")
fout.write("${vec31} ${vec32} ${vec33}\n")
fout.write("EOF\n")
fout.write(" yhrun $PMF_PWX < relax-${a}-${c}.in > relax-${a}-${c}.out\n")
fout.write(" done\n")
else:
# only optimize a
fout.write(" cp relax.in.template relax-${a}.in\n")
fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${a} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c_in} / ${c_in}; print result\" | bc`)\n")
fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c_in} / ${c_in}; print result\" | bc`)\n")
fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c_in} / ${c_in}; print result\" | bc`)\n")
fout.write(" cat >> relax-${a}.in <<EOF\n")
fout.write("\n")
fout.write("CELL_PARAMETERS angstrom\n")
fout.write("${vec11} ${vec12} ${vec13}\n")
fout.write("${vec21} ${vec22} ${vec23}\n")
fout.write("${vec31} ${vec32} ${vec33}\n")
fout.write("EOF\n")
fout.write(" yhrun $PMF_PWX < relax-${a}.in > relax-${a}.out\n")
fout.write("done\n")
else:
# a is not optimized
if nc >= 2:
# only optimize c
fout.write("for c in `seq -w %f %f %f`\n" % (c+range_c_start, range_c[2], c+range_c_end))
fout.write("do\n")
fout.write(" cp relax.in.template relax-${c}.in\n")
fout.write(" vec11=$(printf \"%-.6f\" `echo \"scale=6; result=${a1} * ${a_in} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec12=$(printf \"%-.6f\" `echo \"scale=6; result=${a2} * ${a_in} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec13=$(printf \"%-.6f\" `echo \"scale=6; result=${a3} * ${a_in} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec21=$(printf \"%-.6f\" `echo \"scale=6; result=${b1} * ${a_in} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec22=$(printf \"%-.6f\" `echo \"scale=6; result=${b2} * ${a_in} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec23=$(printf \"%-.6f\" `echo \"scale=6; result=${b3} * ${a_in} / ${a_in}; print result\" | bc`)\n")
fout.write(" vec31=$(printf \"%-.6f\" `echo \"scale=6; result=${c1} * ${c} / ${c_in}; print result\" | bc`)\n")
fout.write(" vec32=$(printf \"%-.6f\" `echo \"scale=6; result=${c2} * ${c} / ${c_in}; print result\" | bc`)\n")
fout.write(" vec33=$(printf \"%-.6f\" `echo \"scale=6; result=${c3} * ${c} / ${c_in}; print result\" | bc`)\n")
fout.write(" cat >> relax-${c}.in<<EOF\n")
fout.write("\n")
fout.write("CELL_PARAMETERS angstrom\n")
fout.write("${vec11} ${vec12} ${vec13}\n")
fout.write("${vec21} ${vec22} ${vec23}\n")
fout.write("${vec31} ${vec32} ${vec33}\n")
fout.write("EOF\n")
fout.write(" yhrun $PMF_PWX < relax-${c}.in > relax-${c}.out\n")
fout.write("done\n")
else:
# neither a or c is optimized
pass
# gen pbs script
with open("relax-tetragonal-%d-%d.pbs" % (i_batch_a, i_batch_c), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("#PBS -N %s-%d-%d\n" % (self.run_params["jobname"], i_batch_a, i_batch_c))
fout.write("#PBS -l nodes=%d:ppn=%d\n" % (self.run_params["nodes"], self.run_params["ppn"]))
if "queue" in self.run_params and self.run_params["queue"] != None:
fout.write("#PBS -q %s\n" %self.run_params["queue"])
fout.write("\n")
fout.write("cd $PBS_O_WORKDIR\n")
fout.write("NP=`cat $PBS_NODEFILE | wc -l`\n")
#fout.write("mpirun -np $NP -machinefile $PBS_NODEFILE %s < %s > %s\n" % (cmd, inpname, output))
a = np.sqrt(self.arts.xyz.cell[0][0]**2+self.arts.xyz.cell[0][1]**2+self.arts.xyz.cell[0][2]**2)
b = np.sqrt(self.arts.xyz.cell[1][0]**2+self.arts.xyz.cell[1][1]**2+self.arts.xyz.cell[1][2]**2)
c = np.sqrt(self.arts.xyz.cell[2][0]**2+self.arts.xyz.cell[2][1]**2+self.arts.xyz.cell[2][2]**2)
fout.write("a_in=%f\n" % a)
fout.write("b_in=%f\n" % b)
fout.write("c_in=%f\n" % c)
fout.write("a1=%f\n" % self.arts.xyz.cell[0][0])
fout.write("a2=%f\n" % self.arts.xyz.cell[0][1])
fout.write("a3=%f\n" % self.arts.xyz.cell[0][2])
fout.write("b1=%f\n" % self.arts.xyz.cell[1][0])
fout.write("b2=%f\n" % self.arts.xyz.cell[1][1])
fout.write("b3=%f\n" % self.arts.xyz.cell[1][2])
fout.write("c1=%f\n" % self.arts.xyz.cell[2][0])
fout.write("c2=%f\n" % self.arts.xyz.cell[2][1])
fout.write("c3=%f\n" % self.arts.xyz.cell[2][2])
range_a_start = range_a[0] + i_batch_a * self.batch_a * range_a[2]
range_a_end = range_a[0] + (i_batch_a+1) * self.batch_a * range_a[2] - range_a[2] / 2
# - range_a[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_a_end > range_a[1]:
range_a_end = range_a[1]
range_c_start = range_c[0] + i_batch_c * self.batch_c * range_c[2]
range_c_end = range_c[0] + (i_batch_c+1) * self.batch_c * range_c[2] - range_c[2] / 2
# - range_c[2] / 2, so that the last value is ignored which is actually the begining of next batch
if range_c_end > range_c[1]:
range_c_end | |
give them a toonup or make them suffer.
if toon.getHp() > toon.getMaxHp():
toon.takeDamage(toon.getHp() - toon.getMaxHp())
else:
toon.toonUp(toon.getMaxHp() - toon.getHp())
# Set their type and level that they specified.
types = toon.getCogTypes()
types[corpIndex] = typeIndex
toon.b_setCogTypes(types)
levels = toon.getCogLevels()
levels[corpIndex] = level - 1 # -1 because it starts at 0
toon.b_setCogLevels(levels)
return "Set %s disguise to %s Level %d." % (
corp.capitalize(), SuitBattleGlobals.SuitAttributes[type]['name'], level)
class Merits(MagicWord):
desc = "Set the target's merits to the value specified."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("corp", str, True), ("amount", int, True)]
def handleWord(self, invoker, avId, toon, *args):
corp = args[0]
amount = args[1]
corps = ['bossbot', 'lawbot', 'cashbot', 'sellbot']
if corp not in corps:
return "Invalid cog corp. specified."
corpIndex = corps.index(corp)
merits = toon.getCogMerits()
merits[corpIndex] = amount
toon.b_setCogMerits(merits)
class Pouch(MagicWord):
desc = "Set the target's max gag limit."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("amount", int, True)]
def handleWord(self, invoker, avId, toon, *args):
amt = args[0]
if not 1 <= amt <= 255:
return "Can't set {0}'s pouch size to {1}! Specify a value between 1 and 255.".format(toon.getName(), amt)
toon.b_setMaxCarry(amt)
return "Set %s's pouch size to %d" % (toon.getName(), amt)
class SetNametagStyle(MagicWord):
aliases = ["setnametag", "nametag", "nametagstyle"]
desc = "Set the style of the target's nametag to the specified ID."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("style", str, True)]
def handleWord(self, invoker, avId, toon, *args):
styleName = args[0]
nametag_list = list(TTLocalizer.NametagFontNames)
for index, item in enumerate(nametag_list):
nametag_list[index] = item.lower()
styleName = styleName.lower()
if styleName in nametag_list:
index = nametag_list.index(styleName)
elif styleName == "basic":
index = 100
else:
return "Invalid nametag name entered."
toon.b_setNametagStyle(index)
return "Set %s's nametag style successfully." % toon.getName()
class SetNametagType(MagicWord):
aliases = ["nametagtype", "lacker"]
desc = "Set the style of the target's nametag to the specified ID."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("type", int, False, 0)]
def handleWord(self, invoker, avId, toon, *args):
type = args[0]
nametagTypeList = [(0, NametagGroup.CCNormal), (1, NametagGroup.CCNonPlayer), (3, NametagGroup.CCSuit)]
for nametag in nametagTypeList:
if type == nametag[0]:
toon.b_setNametagType(nametag[0])
return "Changed %s's nametag type successfully." % toon.getName()
return "Invalid nametag type specified!"
class Phrase(MagicWord):
desc = "Unlocks a new phrase and adds it to target's list of 'My Phrases'."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("id", int, True)]
def handleWord(self, invoker, avId, toon, *args):
phraseId = args[0]
strings = OTPLocalizer.CustomSCStrings
scId = int(phraseId)*10
id = None
if scId in strings.iterkeys():
id = scId
if id:
if toon.customMessages.count(id) != 0:
return "%s already has this custom phrase!" % toon.getName()
if len(toon.customMessages) >= ToontownGlobals.MaxCustomMessages:
toon.customMessages = toon.customMessages[1:]
toon.customMessages.append(id)
toon.d_setCustomMessages(toon.customMessages)
return "Added new phrase to %s's custom phrases." % toon.getName()
return "Invalid phrase id!"
class SetSos(MagicWord):
aliases = ["sos"]
desc = "Sets the target's SOS cards. The default is 1 Flippy card."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("amount", int, False, 1), ("name", str, False, 'Flippy')]
def handleWord(self, invoker, avId, toon, *args):
amt = args[0]
name = args[1]
if not 0 <= amt <= 100:
return "The amount must be between 0 and 100!"
for npcId, npcName in TTLocalizer.NPCToonNames.items():
if name.lower() == npcName.lower():
if npcId not in NPCToons.npcFriends:
continue
break
else:
return "The {0} SOS card was not found!".format(name)
if (amt == 0) and (npcId in invoker.NPCFriendsDict):
del toon.NPCFriendsDict[npcId]
else:
toon.NPCFriendsDict[npcId] = amt
toon.d_setNPCFriendsDict(toon.NPCFriendsDict)
return "Restocked {0} {1} SOS cards successfully!".format(amt, npcName)
class FreeBldg(MagicWord):
desc = "Closest cog building gets freed."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
returnCode = invoker.doBuildingFree()
if returnCode[0] == 'success':
return "Successfully took back building!"
elif returnCode[0] == 'busy':
return "Toons are currently taking back the building!"
return "Couldn't free building."
class MaxGarden(MagicWord):
desc = "Maxes your garden."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
invoker.b_setShovel(3)
invoker.b_setWateringCan(3)
invoker.b_setShovelSkill(639)
invoker.b_setWateringCanSkill(999)
invoker.b_setGardenTrophies(GardenGlobals.TrophyDict.keys())
#invoker.b_setFlowerCollection([1, 2, 3, 4, 5], [1, 2, 3, 4, 5, 6, 7, 8, 9])
#print invoker.flowerCollection.getNetLists()
class InstaDelivery(MagicWord):
aliases = ["fastdel"]
desc = "Instant delivery of an item."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
invoker.instantDelivery = not invoker.instantDelivery
for item in toon.onOrder:
item.deliveryDate = int(time.time() / 60) # Deliver all the packages that they already ordered, too.
return "Instant Delivery has been turned {0}.".format('on' if invoker.instantDelivery else 'off')
class SetMuzzle(MagicWord):
aliases = ["muzzle"]
desc = "Modify the targets muzzle."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("id", int, False, 0)]
def handleWord(self, invoker, avId, toon, *args):
muzzle = args[0]
if not 0 <= muzzle <= 5:
return "Invalid muzzle. (0-5)"
toon.b_setMuzzle(muzzle)
if muzzle == 0:
return "Returned muzzle to normal!"
class SetEyes(MagicWord):
aliases = ["eyes"]
desc = "Modify the targets eyes."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("id", int, False, 0)]
def handleWord(self, invoker, avId, toon, *args):
type = args[0]
if not 0 <= type <= 3:
return "The type must be between 0 and 3!"
toon.b_setEyes(type)
if type == 0:
return "Returned eyes to normal!"
class SetTaskCarryLimit(MagicWord):
aliases = ["taskcarrylimit", "settaskcarry", "taskcarry", "setcarry"]
desc = "Set the amount of tasks a toon can carry."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("limit", int, False, 1)]
def handleWord(self, invoker, avId, toon, *args):
amt = args[0]
plural = 's'
if not 1 <= amt <= 4:
return "The amount must be between 1 and 4!"
if amt == 1:
plural = ''
toon.b_setQuestCarryLimit(amt)
return "You can now carry {0} task{1}!".format(amt, plural)
class SetAlwaysHitCogs(MagicWord):
aliases = ["alwayshitcogs", "hitcogs"]
desc = "Enable/Disable always hitting cogs."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
if not toon:
return
if not toon.getAlwaysHitSuits():
toon.setAlwaysHitSuits(True)
else:
toon.setAlwaysHitSuits(False)
return "Toggled always hitting Cogs %s for %s" % ('ON' if toon.getAlwaysHitSuits() else 'OFF', toon.getName())
class EndFlying(MagicWord):
desc = "Ends the flying game in a Lawbot Field Office."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
from toontown.cogdominium.DistCogdoFlyingGameAI import DistCogdoFlyingGameAI
flyingGame = None
for do in simbase.air.doId2do.values():
if isinstance(do, DistCogdoFlyingGameAI):
if invoker.doId in do.getToonIds():
flyingGame = do
break
if flyingGame:
flyingGame._handleGameFinished()
return "Completed the flying game."
return "You are not in a flying game!"
class Ping(MagicWord):
desc = "Pong!"
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
return "Pong!"
class GardenGame(MagicWord):
aliases = ["gardendrop"]
desc = "Start the garden drop mini-game."
execLocation = MagicWordConfig.EXEC_LOC_CLIENT
def handleWord(self, invoker, avId, toon, *args):
from toontown.estate import GardenDropGame
base.localAvatar.game = GardenDropGame.GardenDropGame()
class WinGame(MagicWord):
aliases = ["winminigame"]
desc = "Win the trolley game you are in."
execLocation = MagicWordConfig.EXEC_LOC_CLIENT
def handleWord(self, invoker, avId, toon, *args):
messenger.send("minigameVictory")
return "Trolley game won."
class GetZone(MagicWord):
aliases = ["getzoneid"]
desc = "Returns the target's zone ID."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
def handleWord(self, invoker, avId, toon, *args):
return "{}'s zone ID is {}.".format(toon.getName(), str(toon.zoneId))
class SetAccessLevel(MagicWord):
administrative = True
aliases = ["accesslevel", "access", "setaccess"]
desc = "Sets the target's access level."
execLocation = MagicWordConfig.EXEC_LOC_SERVER
arguments = [("rank", int, False, 100)]
affectRange = [MagicWordConfig.AFFECT_OTHER]
def handleWord(self, invoker, avId, toon, *args):
rank = args[0]
if not -100 <= rank <= 800:
return "Can't set {0}'s speed to {1}! Specify a value between -100 and 800.".format(toon.getName(), rank)
if invoker.getAccessLevel() == rank:
return "You cannot set the target to your own Access Level!"
rank = OTPGlobals.AccessLevelInt2Name.get(rank)
toon.b_setAccessLevel(rank)
return "Set {0}'s Access Level to {1}.".format(toon.getName(), rank)
class PrintChildren(MagicWord):
aliases = ["children"]
desc = "Prints all of render's children to the client log."
execLocation = MagicWordConfig.EXEC_LOC_CLIENT
arguments = [("type", int, False, 0), ("mode", int, False, 0)]
def handleWord(self, invoker, avId, toon, *args):
type = args[0]
mode = args[1]
if not type:
node = render
else:
node = render2d
if not mode:
print node.getChildren()
elif mode == 2:
for child in node.getChildren():
for secondaryChild in child.getChildren():
print secondaryChild.getChildren()
elif mode == 3:
for child in node.getChildren():
for secondaryChild in child.getChildren():
for thirdChild in secondaryChild.getChildren():
print thirdChild.getChildren()
elif mode == 4:
for child in node.getChildren():
for secondaryChild in child.getChildren():
for thirdChild in secondaryChild.getChildren():
for fourthChild in thirdChild.getChildren():
print fourthChild.getChildren()
elif mode == 5:
for child in node.getChildren():
for secondaryChild in child.getChildren():
for thirdChild in secondaryChild.getChildren():
for fourthChild in thirdChild.getChildren():
for fifthChild in fourthChild.getChildren():
print fifthChild.getChildren()
else:
for child in node.getChildren():
print child.getChildren()
# Instantiate all classes defined here to register them.
# A bit hacky, but better than the old system
for item in globals().values():
if | |
= w@E
A = E - mu
ub = unbias_var(w, avoid_pathological=False)
C12 = sqrt(ub*w[:,None]) * A
return C12
def mask_unique_of_sorted(idx):
"NB: returns a mask which is True at [i] iff idx[i] is NOT unique."
duplicates = idx==np.roll(idx,1)
duplicates |= idx==np.roll(idx,-1)
return duplicates
def bandw(N,m):
""""
Optimal bandwidth (not bandwidth^2), as per Scott's rule-of-thumb.
Refs: [1] section 12.2.2, and [2] #Rule_of_thumb
[1]: Doucet, <NAME>, Gordon, 2001:
"Sequential Monte Carlo Methods in Practice"
[2] wikipedia.org/wiki/Multivariate_kernel_density_estimation
"""
return N**(-1/(m+4))
def regularize(C12,E,idx,no_uniq_jitter):
"""
After resampling some of the particles will be identical.
Therefore, if noise.is_deterministic: some noise must be added.
This is adjusted by the regularization 'reg' factor
(so-named because Dirac-deltas are approximated Gaussian kernels),
which controls the strength of the jitter.
This causes a bias. But, as N-->∞, the reg. bandwidth-->0, i.e. bias-->0.
Ref: [1], section 12.2.2.
[1]: Doucet, <NAME>, Gordon, 2001:
"Sequential Monte Carlo Methods in Practice"
"""
# Select
E = E[idx]
# Jitter
if no_uniq_jitter:
dups = mask_unique_of_sorted(idx)
sample, chi2 = sample_quickly_with(C12, N=sum(dups))
E[dups] += sample
else:
sample, chi2 = sample_quickly_with(C12, N=len(E))
E += sample
return E, chi2
def resample(w,kind='Systematic',N=None,wroot=1.0):
"""
Multinomial resampling.
- kind: 'Systematic', 'Residual' or 'Stochastic'.
'Stochastic' corresponds to np.random.choice() or np.random.multinomial().
'Systematic' and 'Residual' are more systematic (less stochastic)
varaitions of 'Stochastic' sampling.
Among the three, 'Systematic' is fastest, introduces the least noise,
and brings continuity benefits for localized particle filters,
and is therefore generally prefered.
Example: see docs/test_resample.py.
- N can be different from len(w)
(e.g. in case some particles have been elimintated).
- wroot: Adjust weights before resampling by this root to
promote particle diversity and mitigate thinning.
The outcomes of the resampling are then weighted to maintain un-biased-ness.
Ref: [3], section 3.1
Note: (a) resampling methods are beneficial because they discard
low-weight ("doomed") particles and reduce the variance of the weights.
However, (b) even unbiased/rigorous resampling methods introduce noise;
(increases the var of any empirical estimator, see [1], section 3.4).
How to unify the seemingly contrary statements of (a) and (b) ?
By recognizing that we're in the *sequential/dynamical* setting,
and that *future* variance may be expected to be lower by focusing
on the high-weight particles which we anticipate will
have more informative (and less variable) future likelihoods.
[1]: <NAME>, 2009, v1.1:
"A Tutorial on Particle Filtering and Smoothing: Fifteen years later."
[2]: <NAME>, 2009: "Particle Filtering in Geophysical Systems"
[3]: Liu, <NAME>, 2001:
"A theoretical framework for sequential importance sampling with resampling"
"""
assert(abs(w.sum()-1) < 1e-5)
# Input parsing
N_o = len(w) # N _original
if N is None: # N to sample
N = N_o
# Compute factors s such that s*w := w**(1/wroot).
if wroot!=1.0:
s = ( w**(1/wroot - 1) ).clip(max=1e100)
s /= (s*w).sum()
sw = s*w
else:
s = ones(N_o)
sw = w
# Do the actual resampling
idx = _resample(sw,kind,N_o,N)
w = 1/s[idx] # compensate for above scaling by s
w /= w.sum() # normalize
return idx, w
def _resample(w,kind,N_o,N):
"Core functionality for resample(). See its docstring."
if kind in ['Stochastic','Stoch']:
# van Leeuwen [2] also calls this "probabilistic" resampling
idx = np.random.choice(N_o,N,replace=True,p=w)
# np.random.multinomial is faster (slightly different usage) ?
elif kind in ['Residual','Res']:
# Doucet [1] also calls this "stratified" resampling.
w_N = w*N # upscale
w_I = w_N.astype(int) # integer part
w_D = w_N-w_I # decimal part
# Create duplicate indices for integer parts
idx_I = [i*ones(wi,dtype=int) for i,wi in enumerate(w_I)]
idx_I = np.concatenate(idx_I)
# Multinomial sampling of decimal parts
N_I = w_I.sum() # == len(idx_I)
N_D = N - N_I
idx_D = np.random.choice(N_o,N_D,replace=True,p=w_D/w_D.sum())
# Concatenate
idx = np.hstack((idx_I,idx_D))
elif kind in ['Systematic','Sys']:
# van Leeuwen [2] also calls this "stochastic universal" resampling
U = rand(1) / N
CDF_a = U + arange(N)/N
CDF_o = np.cumsum(w)
#idx = CDF_a <= CDF_o[:,None]
#idx = np.argmax(idx,axis=0) # Finds 1st. SO/a/16244044/
idx = np.searchsorted(CDF_o,CDF_a)
else:
raise KeyError
return idx
def sample_quickly_with(C12,N=None):
"""
Gaussian sampling in the quickest fashion,
which depends on the size of the colouring matrix 'C12'.
"""
(N_,m) = C12.shape
if N is None: N = N_
if N_ > 2*m:
cholR = chol_reduce(C12)
D = randn((N,cholR.shape[0]))
chi2 = np.sum(D**2, axis=1)
sample = D@cholR
else:
chi2_compensate_for_rank = min(m/N_,1.0)
D = randn((N,N_))
chi2 = np.sum(D**2, axis=1) * chi2_compensate_for_rank
sample = D@C12
return sample, chi2
@DA_Config
def EnCheat(upd_a,N,infl=1.0,rot=False,**kwargs):
"""
A baseline/reference method.
Ensemble method that cheats: it knows the truth.
Nevertheless, its error will not necessarily be 0,
because the truth may be outside of the ensemble subspace.
This method is just to provide a baseline for comparison with other methods.
It may very well beat the particle filter with N=infinty.
NB: The forecasts (and their rmse) are given by the standard EnKF.
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
E = X0.sample(N)
stats.assess(0,E=E)
for k,kObs,t,dt in progbar(chrono.forecast_range):
E = f(E,t-dt,dt)
E = add_noise(E, dt, f.noise, kwargs)
if kObs is not None:
# Standard EnKF analysis
hE = h(E,t)
y = yy[kObs]
E = EnKF_analysis(E,hE,h.noise,y,upd_a,stats,kObs)
E = post_process(E,infl,rot)
# Cheating (only used for stats)
w,res,_,_ = sla.lstsq(E.T, xx[k])
if not res.size:
res = 0
res = diag((res/twin.f.m) * ones(twin.f.m))
opt = w @ E
# NB: Center on the optimal solution?
#E += opt - mean(E,0)
stats.assess(k,kObs,mu=opt,Cov=res)
return assimilator
@DA_Config
def Climatology(**kwargs):
"""
A baseline/reference method.
Note that the "climatology" is computed from truth, which might be
(unfairly) advantageous if the simulation is too short (vs mixing time).
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
muC = mean(xx,0)
AC = xx - muC
PC = CovMat(AC,'A')
stats.assess(0,mu=muC,Cov=PC)
stats.trHK[:] = 0
for k,kObs,_,_ in progbar(chrono.forecast_range):
stats.assess(k,kObs,'fau',mu=muC,Cov=PC)
return assimilator
@DA_Config
def OptInterp(**kwargs):
"""
Optimal Interpolation -- a baseline/reference method.
Uses the Kalman filter equations,
but with a prior from the Climatology.
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
# Get H.
msg = "For speed, only time-independent H is supported."
H = h.jacob(np.nan, np.nan)
if not np.all(np.isfinite(H)): raise AssimFailedError(msg)
# Compute "climatological" Kalman gain
muC = mean(xx,0)
AC = xx - muC
PC = (AC.T @ AC) / (xx.shape[0] - 1)
KG = mrdiv(PC@H.T, H@PC@H.T + h.noise.C.full)
# Setup scalar "time-series" covariance dynamics.
# ONLY USED FOR DIAGNOSTICS, not to change the Kalman gain.
Pa = (eye(f.m) - KG@H) @ PC
CorrL = estimate_corr_length(AC.ravel(order='F'))
WaveC = wave_crest(trace(Pa)/trace(2*PC),CorrL)
# Init
mu = muC
stats.assess(0,mu=mu,Cov=PC)
for k,kObs,t,dt in progbar(chrono.forecast_range):
# Forecast
mu = f(mu,t-dt,dt)
if kObs is not None:
stats.assess(k,kObs,'f',mu=muC,Cov=PC)
# Analysis
mu = muC + KG@(yy[kObs] - h(muC,t))
stats.assess(k,kObs,mu=mu,Cov=2*PC*WaveC(k,kObs))
return assimilator
@DA_Config
def Var3D(infl=1.0,**kwargs):
"""
3D-Var -- a baseline/reference method.
Uses the Kalman filter equations,
but with a prior covariance estimated from the Climatology
and a scalar time-series approximation to the dynamics
(that does NOT use the innovation to estimate the backgroiund covariance).
"""
# TODO: The wave-crest yields good results for sak08, but not for boc10
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
# Compute "climatology"
muC = mean(xx,0)
AC = xx - muC
PC = (AC.T @ AC)/(xx.shape[0] - 1)
# Setup scalar "time-series" covariance dynamics
CorrL = estimate_corr_length(AC.ravel(order='F'))
WaveC = wave_crest(0.5,CorrL) # Nevermind careless W0 init
# Init
mu = muC
P = PC
stats.assess(0,mu=mu,Cov=P)
for k,kObs,t,dt in progbar(chrono.forecast_range):
# Forecast
mu = f(mu,t-dt,dt)
P = 2*PC*WaveC(k)
if kObs is not None:
stats.assess(k,kObs,'f',mu=mu,Cov=P)
# Analysis
P *= infl
H = h.jacob(mu,t)
KG = mrdiv(P@H.T, H@P@H.T + h.noise.C.full)
KH = KG@H
mu = mu + KG@(yy[kObs] - h(mu,t))
# Re-calibrate wave_crest with new W0 = Pa/(2*PC).
# Note: obs innovations are not used to estimate P!
Pa = (eye(f.m) - KH) @ P
WaveC = wave_crest(trace(Pa)/trace(2*PC),CorrL)
stats.assess(k,kObs,mu=mu,Cov=2*PC*WaveC(k,kObs))
return assimilator
@DA_Config
def Var3D_Lag(infl=1.0,**kwargs):
"""
Background covariance based on lagged time series.
"""
def assimilator(stats,twin,xx,yy):
f,h,chrono,X0 = twin.f, twin.h, twin.t, twin.X0
# Get H.
msg = "For speed, only time-independent H is supported."
H = h.jacob(np.nan, np.nan)
if not np.all(np.isfinite(H)): raise AssimFailedError(msg)
# Compute "lag" Kalman gain
muC = mean(xx,0)
L = chrono.dkObs
AC | |
from k5test import *
# Skip this test if pkinit wasn't built.
if not os.path.exists(os.path.join(plugins, 'preauth', 'pkinit.so')):
skip_rest('PKINIT tests', 'PKINIT module not built')
# Check if soft-pkcs11.so is available.
try:
import ctypes
lib = ctypes.LibraryLoader(ctypes.CDLL).LoadLibrary('soft-pkcs11.so')
del lib
have_soft_pkcs11 = True
except:
have_soft_pkcs11 = False
# Construct a krb5.conf fragment configuring pkinit.
certs = os.path.join(srctop, 'tests', 'dejagnu', 'pkinit-certs')
ca_pem = os.path.join(certs, 'ca.pem')
kdc_pem = os.path.join(certs, 'kdc.pem')
user_pem = os.path.join(certs, 'user.pem')
privkey_pem = os.path.join(certs, 'privkey.pem')
privkey_enc_pem = os.path.join(certs, 'privkey-enc.pem')
user_p12 = os.path.join(certs, 'user.p12')
user_enc_p12 = os.path.join(certs, 'user-enc.p12')
user_upn_p12 = os.path.join(certs, 'user-upn.p12')
user_upn2_p12 = os.path.join(certs, 'user-upn2.p12')
user_upn3_p12 = os.path.join(certs, 'user-upn3.p12')
generic_p12 = os.path.join(certs, 'generic.p12')
path = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs')
path_enc = os.path.join(os.getcwd(), 'testdir', 'tmp-pkinit-certs-enc')
pkinit_krb5_conf = {'realms': {'$realm': {
'pkinit_anchors': 'FILE:%s' % ca_pem}}}
pkinit_kdc_conf = {'realms': {'$realm': {
'default_principal_flags': '+preauth',
'pkinit_eku_checking': 'none',
'pkinit_identity': 'FILE:%s,%s' % (kdc_pem, privkey_pem),
'pkinit_indicator': ['indpkinit1', 'indpkinit2']}}}
restrictive_kdc_conf = {'realms': {'$realm': {
'restrict_anonymous_to_tgt': 'true' }}}
freshness_kdc_conf = {'realms': {'$realm': {
'pkinit_require_freshness': 'true'}}}
testprincs = {'krbtgt/KRBTEST.COM': {'keys': 'aes128-cts'},
'user': {'keys': 'aes128-cts', 'flags': '+preauth'},
'user2': {'keys': 'aes128-cts', 'flags': '+preauth'}}
alias_kdc_conf = {'realms': {'$realm': {
'default_principal_flags': '+preauth',
'pkinit_eku_checking': 'none',
'pkinit_allow_upn': 'true',
'pkinit_identity': 'FILE:%s,%s' % (kdc_pem, privkey_pem),
'database_module': 'test'}},
'dbmodules': {'test': {
'db_library': 'test',
'alias': {'<EMAIL>': 'user'},
'princs': testprincs}}}
file_identity = 'FILE:%s,%s' % (user_pem, privkey_pem)
file_enc_identity = 'FILE:%s,%s' % (user_pem, privkey_enc_pem)
dir_identity = 'DIR:%s' % path
dir_enc_identity = 'DIR:%s' % path_enc
dir_file_identity = 'FILE:%s,%s' % (os.path.join(path, 'user.crt'),
os.path.join(path, 'user.key'))
dir_file_enc_identity = 'FILE:%s,%s' % (os.path.join(path_enc, 'user.crt'),
os.path.join(path_enc, 'user.key'))
p12_identity = 'PKCS12:%s' % user_p12
p12_upn_identity = 'PKCS12:%s' % user_upn_p12
p12_upn2_identity = 'PKCS12:%s' % user_upn2_p12
p12_upn3_identity = 'PKCS12:%s' % user_upn3_p12
p12_generic_identity = 'PKCS12:%s' % generic_p12
p12_enc_identity = 'PKCS12:%s' % user_enc_p12
p11_identity = 'PKCS11:soft-pkcs11.so'
p11_token_identity = ('PKCS11:module_name=soft-pkcs11.so:'
'slotid=1:token=SoftToken (token)')
# Start a realm with the test kdb module for the following UPN SAN tests.
realm = K5Realm(krb5_conf=pkinit_krb5_conf, kdc_conf=alias_kdc_conf,
create_kdb=False)
realm.start_kdc()
mark('UPN SANs')
# Compatibility check: cert contains UPN "user", which matches the
# request principal <EMAIL> if parsed as a normal principal.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn2_identity])
# Compatibility check: cert contains UPN "<EMAIL>", which matches
# the request principal <EMAIL> if parsed as a normal principal.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn3_identity])
# Cert contains UPN "<EMAIL>" which is aliased to the request
# principal.
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn_identity])
# Test an id-pkinit-san match to a post-canonical principal.
realm.kinit('<EMAIL>',
flags=['-E', '-X', 'X509_user_identity=%s' % p12_identity])
# Test a UPN match to a post-canonical principal. (This only works
# for the cert with the UPN containing just "user", as we don't allow
# UPN reparsing when comparing to the canonicalized client principal.)
realm.kinit('<EMAIL>',
flags=['-E', '-X', 'X509_user_identity=%s' % p12_upn2_identity])
# Test a mismatch.
msg = 'kinit: Client name mismatch while getting initial credentials'
realm.run([kinit, '-X', 'X509_user_identity=%s' % p12_upn2_identity, 'user2'],
expected_code=1, expected_msg=msg)
realm.stop()
realm = K5Realm(krb5_conf=pkinit_krb5_conf, kdc_conf=pkinit_kdc_conf,
get_creds=False)
# Sanity check - password-based preauth should still work.
mark('password preauth sanity check')
realm.run(['./responder', '-r', 'password=%s' % password('<PASSWORD>'),
realm.user_princ])
realm.kinit(realm.user_princ, password=password('<PASSWORD>'))
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Having tested password preauth, remove the keys for better error
# reporting.
realm.run([kadminl, 'purgekeys', '-all', realm.user_princ])
# Test anonymous PKINIT.
mark('anonymous')
realm.kinit('@%s' % realm.realm, flags=['-n'], expected_code=1,
expected_msg='not found in Kerberos database')
realm.addprinc('WELLKNOWN/ANONYMOUS')
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.klist('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS')
realm.run([kvno, realm.host_princ])
out = realm.run(['./adata', realm.host_princ])
if '97:' in out:
fail('auth indicators seen in anonymous PKINIT ticket')
# Test anonymous kadmin.
mark('anonymous kadmin')
f = open(os.path.join(realm.testdir, 'acl'), 'a')
f.write('WELLKNOWN/ANONYMOUS@WELLKNOWN:ANONYMOUS a *')
f.close()
realm.start_kadmind()
realm.run([kadmin, '-n', 'addprinc', '-pw', 'test', 'testadd'])
realm.run([kadmin, '-n', 'getprinc', 'testadd'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
realm.stop_kadmind()
# Test with anonymous restricted; FAST should work but kvno should fail.
mark('anonymous restricted')
r_env = realm.special_env('restrict', True, kdc_conf=restrictive_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=r_env)
realm.kinit('@%s' % realm.realm, flags=['-n'])
realm.kinit('@%s' % realm.realm, flags=['-n', '-T', realm.ccache])
realm.run([kvno, realm.host_princ], expected_code=1,
expected_msg='KDC policy rejects request')
# Regression test for #8458: S4U2Self requests crash the KDC if
# anonymous is restricted.
mark('#8458 regression test')
realm.kinit(realm.host_princ, flags=['-k'])
realm.run([kvno, '-U', 'user', realm.host_princ])
# Go back to the normal KDC environment.
realm.stop_kdc()
realm.start_kdc()
# Run the basic test - PKINIT with FILE: identity, with no password on the key.
mark('FILE identity, no password')
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'PKINIT client received freshness token from KDC',
'PKINIT loading CA certs and CRLs from FILE',
'PKINIT client making DH request',
' preauth for next request: PA-FX-COOKIE (133), PA-PK-AS-REQ (16)',
'PKINIT client verified DH reply',
'PKINIT client found id-pkinit-san in KDC cert',
'PKINIT client matched KDC principal krbtgt/')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity],
expected_trace=msgs)
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# Try again using RSA instead of DH.
mark('FILE identity, no password, RSA')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity,
'-X', 'flag_RSA_PROTOCOL=yes'],
expected_trace=('PKINIT client making RSA request',
'PKINIT client verified RSA reply'))
realm.klist(realm.user_princ)
# Test a DH parameter renegotiation by temporarily setting a 4096-bit
# minimum on the KDC. (Preauth type 16 is PKINIT PA_PK_AS_REQ;
# 109 is PKINIT TD_DH_PARAMETERS; 133 is FAST PA-FX-COOKIE.)
mark('DH parameter renegotiation')
minbits_kdc_conf = {'realms': {'$realm': {'pkinit_dh_min_bits': '4096'}}}
minbits_env = realm.special_env('restrict', True, kdc_conf=minbits_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=minbits_env)
msgs = ('Sending unauthenticated request',
'/Additional pre-authentication required',
'Preauthenticating using KDC method data',
'Preauth module pkinit (16) (real) returned: 0/Success',
' preauth for next request: PA-FX-COOKIE (133), PA-PK-AS-REQ (16)',
'/Key parameters not accepted',
'Preauth tryagain input types (16): 109, PA-FX-COOKIE (133)',
'trying again with KDC-provided parameters',
'Preauth module pkinit (16) tryagain returned: 0/Success',
' preauth for next request: PA-PK-AS-REQ (16), PA-FX-COOKIE (133)')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity],
expected_trace=msgs)
# Test enforcement of required freshness tokens. (We can leave
# freshness tokens required after this test.)
mark('freshness token enforcement')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity,
'-X', 'disable_freshness=yes'])
f_env = realm.special_env('freshness', True, kdc_conf=freshness_kdc_conf)
realm.stop_kdc()
realm.start_kdc(env=f_env)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_identity,
'-X', 'disable_freshness=yes'],
expected_code=1, expected_msg='Preauthentication failed')
# Anonymous should never require a freshness token.
realm.kinit('@%s' % realm.realm, flags=['-n', '-X', 'disable_freshness=yes'])
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the prompter.
# Expect failure if the responder does nothing, and we have no prompter.
mark('FILE identity, password on key (prompter)')
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % file_enc_identity],
password='<PASSWORD>')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
realm.run(['./adata', realm.host_princ],
expected_msg='+97: [indpkinit1, indpkinit2]')
# Run the basic test - PKINIT with FILE: identity, with a password on the key,
# supplied by the responder.
# Supply the response in raw form.
mark('FILE identity, password on key (responder)')
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % file_enc_identity,
'-X', 'X509_user_identity=%s' % file_enc_identity,
realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % file_enc_identity,
'-p', '%s=%s' % (file_enc_identity, 'encrypted'), realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with no password on the key.
mark('DIR identity, no password')
os.mkdir(path)
os.mkdir(path_enc)
shutil.copy(privkey_pem, os.path.join(path, 'user.key'))
shutil.copy(privkey_enc_pem, os.path.join(path_enc, 'user.key'))
shutil.copy(user_pem, os.path.join(path, 'user.crt'))
shutil.copy(user_pem, os.path.join(path_enc, 'user.crt'))
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
mark('DIR identity, password on key (prompter)')
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % dir_enc_identity],
password='<PASSWORD>')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with DIR: identity, with a password on the key, supplied by the
# responder.
# Supply the response in raw form.
mark('DIR identity, password on key (responder)')
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % dir_file_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % dir_file_enc_identity,
'-X', 'X509_user_identity=%s' % dir_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % dir_enc_identity,
'-p', '%s=%s' % (dir_file_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with no password on the bundle.
mark('PKCS12 identity, no password')
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# prompter.
# Expect failure if the responder does nothing, and we have no prompter.
mark('PKCS12 identity, password on bundle (prompter)')
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ],
expected_code=2)
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_enc_identity],
password='<PASSWORD>')
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
# PKINIT with PKCS12: identity, with a password on the bundle, supplied by the
# responder.
# Supply the response in raw form.
mark('PKCS12 identity, password on bundle (responder)')
realm.run(['./responder', '-x', 'pkinit={"%s": 0}' % p12_enc_identity,
'-r', 'pkinit={"%s": "encrypted"}' % p12_enc_identity,
'-X', 'X509_user_identity=%s' % p12_enc_identity, realm.user_princ])
# Supply the response through the convenience API.
realm.run(['./responder', '-X', 'X509_user_identity=%s' % p12_enc_identity,
'-p', '%s=%s' % (p12_enc_identity, 'encrypted'),
realm.user_princ])
realm.klist(realm.user_princ)
realm.run([kvno, realm.host_princ])
mark('pkinit_cert_match rules')
# Match a single rule.
rule = '<SAN>^<EMAIL>$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
# Regression test for #8670: match a UPN SAN with a single rule.
rule = '<SAN>^<EMAIL>$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_upn_identity])
realm.klist(realm.user_princ)
# Match a combined rule (default prefix is &&).
rule = '<SUBJECT>CN=user$<KU>digitalSignature,keyEncipherment'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
# Fail an && rule.
rule = '&&<SUBJECT>O=OTHER.COM<SAN>^<EMAIL>$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
msg = 'kinit: Certificate mismatch while getting initial credentials'
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity],
expected_code=1, expected_msg=msg)
# Pass an || rule.
rule = '||<SUBJECT>O=KRBTEST.COM<SAN>^<EMAIL>$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % p12_identity])
realm.klist(realm.user_princ)
# Fail an || rule.
rule = '||<SUBJECT>O=OTHER.COM<SAN>^<EMAIL>$'
realm.run([kadminl, 'setstr', realm.user_princ, 'pkinit_cert_match', rule])
msg = 'kinit: Certificate mismatch while getting initial credentials'
realm.kinit(realm.user_princ,
flags=['-X', 'X509_user_identity=%s' % | |
'horizon', where all elements are equal to the last element of 'series'
Parameters
----------
series : ndarray
The data to use for the naive forecast
horizon : int
The length of the forecast
Returns
-------
ndarray
A simple naive forecast (Shape: ('horizon', ))
"""
return np.array([series[-1]] * horizon)
def persist_forecast(x_train, x_test, y_train, forecast_horizon, periodicity = None, seasonality=1, decomposed=False, alpha = 1.96):
"""
Train and validate a naive (persistence) timeseries model or, (if decomposed = True), a naive
timeseries model cleared with seasonal decomposition (STL)
Fit persistence forecast over the train data, and calculate the standard deviation of residuals for each horizon
over the horizon. Output the persistence forecast over the test data, and std_deviation (upper and lower intervals)
for each hour.
Parameters
----------
x_train : ndarray
the input training dataset. (Shape: (length of training data,forecast_horizon))
x_test : ndarray
the input test dataset. (Shape: (length of test data,forecast_horizon))
y_train : ndarray
The targets (actual values) of the training dataset. (Shape: (length of training data,forecast_horizon))
forecast_horizon : int
Number of future steps to be forecasted
periodicity : int, default = None
The periodicity of the sequence. If endog (first arg of seasonal_decomposed) is ndarray,
periodicity must be provided.
seasonality : int, default = 1
Length of the seasonal smoother. Must be an odd integer, and should normally be >= 7
decomposed : bool, default = False
If True, use seasonal decomposition
alpha : float, default = 1.96
Measure to adjust confidence interval. Default is set to 1.96, which equals to 95% confidence
Returns
-------
ndarray
Expected forecast values for each test sample over the forecast horizon.
(Shape: (len(x_test),forecast_horizon))
ndarray
The upper interval for the given forecasts. (Shape: (len(x_test),forecast_horizon))
ndarray
The lower interval for the forecasts. (Shape: (len(x_test),forecast_horizon))
"""
if decomposed: print('Train a naive timeseries model cleared with seasonal decomposition (STL)...')
else: print('Train a naive (persistence) timeseries model...')
point_forecast_train = np.zeros(shape=(len(x_train), forecast_horizon))
# difference between the observations and the corresponding fitted values
residuals = np.zeros(shape=(len(x_train), forecast_horizon))
if decomposed:
for j in range(forecast_horizon):
x_train[:, j] = seasonal_decomposed(np.reshape(x_train[:, j], (len(x_train), 1)),
periodicity=periodicity,seasonality=seasonality)
x_test[:, j] = seasonal_decomposed(np.reshape(x_test[:, j], (len(x_test), 1)),
periodicity=periodicity,seasonality=seasonality)
for i in range(len(x_train)):
point_forecast_train[i, :] = simple_naive(x_train[i, :], forecast_horizon)
residuals[i, :] = y_train[i, :] - point_forecast_train[i, :]
# std deviation of residual for each hour(column wise)
residual_std = np.std(residuals, axis=0)
std_dev = np.zeros(shape=(1, forecast_horizon))
for step in range(forecast_horizon):
# std_dev[0,step] = residual_std[step]*np.sqrt(step+1)
std_dev[0, step] = residual_std[step]
expected_value = np.zeros(shape=(len(x_test), forecast_horizon))
for i in range(len(x_test)):
expected_value[i, :] = simple_naive(x_test[i, :], forecast_horizon)
fc_u = expected_value + alpha * std_dev
fc_l = expected_value - alpha * std_dev
if decomposed: print('Training and validating naive (STL) completed.')
else: print('Training and validating naive (persistence) model completed.')
return expected_value, fc_u, fc_l
def seasonal_naive(series, forecast_horizon, periodicity, seasonality=1):
"""
A seasonal naive forecast
Parameters
----------
series : ndarray
The data to use for the seasonal naive forecast
forecast_horizon : int
Number of future steps to be forecasted
periodicity : int
The periodicity of the sequence.
seasonality : int, default = 1
Length of the seasonal smoother. Must be an odd integer, and should normally be >= 7
Returns
-------
ndarray
A seasonal naive forecast
"""
forecast = np.empty([forecast_horizon])
for i in range(len(forecast)):
if i + 1 > periodicity*seasonality:
forecast[i] = series[i - 2 * periodicity*seasonality]
else:
forecast[i] = series[i - periodicity*seasonality]
return forecast
def seasonal_decomposed(series, periodicity=None, seasonality=1):
"""
Return the given time series after seasonal adjustment ( Y(t) - S(t) )
Parameters
----------
series : ndarray
The series to be seasonally decomposed
periodicity : int
The periodicity of the sequence.
seasonality : int, default = 1
Length of the seasonal smoother. Must be an odd integer, and should normally be >= 7
Returns
-------
ndarray
The seasonally decomposed series
"""
# seasonally adjusted time series Y(t)-S(t).
stl = STL(series.squeeze(), period=periodicity, seasonal=seasonality)
res = stl.fit()
#plt = res.plot()
#plt.show()
return series.squeeze()-res.seasonal
def seasonal_forecast(x_train, x_test, y_train, forecast_horizon, periodicity = 1,seasonality=1, decomposed=False, alpha = 1.96):
"""
Train and validate a seasonal naive timeseries model with the given seasonality
Fit periodic forecast over the train data, calculate the standard deviation of residuals for
each horizon over the horizon. Output the periodic forecast generated using the test
data and the std_deviation (upper and lower intervals) for each hour.
Parameters
----------
x_train : ndarray
the input training dataset (Shape: (length of training data,forecast_horizon))
x_test : ndarray
the input test dataset (Shape: (length of test data,forecast_horizon))
y_train : ndarray
The targets (actual values) of the training dataset. (Shape: (length of training data,forecast_horizon))
forecast_horizon : int
Number of future steps to be forecasted
periodicity : int, default = 1
The periodicity of the sequence.
seasonality : int, default = 1
Length of the seasonal smoother. Must be an odd integer, and should normally be >= 7
decomposed : bool, default = False
If True, use seasonal decomposition
alpha : float, default = 1.96
Measure to adjust confidence interval. Default is set to 1.96, which equals to 95% confidence
Returns
-------
ndarray
Expected forecast values for each test sample over the forecast horizon.
(Shape: (len(x_test),forecast_horizon))
ndarray
The upper interval for the given forecasts. (Shape: (len(x_test),forecast_horizon))
ndarray
The lower interval for the forecasts. (Shape: (len(x_test),forecast_horizon))
"""
print('Train a seasonal naive timeseries model with seasonality=', seasonality, '...')
naive_forecast = np.zeros(shape=(len(x_train), forecast_horizon))
# difference between the observations and the corresponding fitted values
residuals = np.zeros(shape=(len(x_train), forecast_horizon))
if decomposed:
for j in range(forecast_horizon):
x_train[:, j] = seasonal_decomposed(np.reshape(x_train[:, j], (len(x_train), 1)), periodicity, seasonality)
x_test[:, j] = seasonal_decomposed(np.reshape(x_test[:, j], (len(x_test), 1)), periodicity, seasonality)
for i in range(len(x_train)):
naive_forecast[i, :] = seasonal_naive(x_train[i, :], forecast_horizon, periodicity, seasonality)
residuals[i, :] = y_train[i, :] - naive_forecast[i, :]
# std deviation of residual for each hour(column wise)
residual_std = np.std(residuals, axis=0)
std_dev = np.zeros(shape=(1, forecast_horizon))
for step in range(forecast_horizon):
# forecast_std[0,step] = residual_std[step]*np.sqrt(step+1)
std_dev[0, step] = residual_std[step]
expected_value = np.zeros(shape=(len(x_test), forecast_horizon))
for i in range(len(x_test)):
expected_value[i, :] = seasonal_naive(x_test[i, :], forecast_horizon, periodicity, seasonality)
fc_u = expected_value + alpha * std_dev
fc_l = expected_value - alpha * std_dev
print('Training and validating seasonal naive model completed.')
return expected_value, fc_u, fc_l
def exp_smoothing(y_train, y_test, forecast_horizon=1, limit_steps=False, pi_alpha=1.96, online = True):
"""
Train an exponential smoothing timeseries model (ETS)
Parameters
----------
y_train : pandas.DataFrame
The train values of the target variable
y_test : pandas.DataFrame
Values of exogenous features
forecast_horizon : int, default = 1
Number of future steps to be forecasted
limit_steps : int, default = False
limits the number of simulation/predictions into the future. If False, steps is equal to length of validation set
pi_alpha : float, default = 1.96
Measure to adjust confidence interval, default is set to 1.96, which is a 95% PI
online : bool, default = True
if True the new observations are used to fit the model again
Returns
-------
ndarray
Expected forecast values for each test sample over the forecast horizon.
(Shape: (len(y_train),forecast_horizon))
ndarray
The upper interval for the given forecasts. (Shape: (1,forecast_horizon))
ndarray
The lower interval for the forecasts. (Shape: (1,forecast_horizon))
"""
print('Train an exponential smoothing timeseries model (ETS)...')
num_cores = max(multiprocessing.cpu_count()-2,1)
model = ETSModel(y_train, error="add", trend="add", damped_trend=True, seasonal="add",
dates=y_train.index)
fit = model.fit()
def ets_predict(i):
if online:
# extend the train-series with observed values as we move forward in the prediction horizon
# to achieve a receding window prediction
y_train_i = pd.concat([y_train, y_test.iloc[0:i]])
model = ETSModel(y_train_i, error="add", trend="add", damped_trend=True, seasonal="add",
dates=y_train_i.index)
fit = model.fit()
# There are several different ETS methods available:
# - forecast: makes out of sample predictions
# - predict: in sample and out of sample predictions
# - simulate: runs simulations of the statespace model
# - get_prediction: in sample and out of sample predictions, as well as prediction intervals
pred = fit.get_prediction(start=y_test.index[i], end=y_test.index[
i + forecast_horizon - 1]).summary_frame() # with: method = 'simulated', simulate_repetitions=100 we can simulate the PI's
## --plotting current prediction--
# plt.rcParams['figure.figsize'] = (12, 8)
# pred["mean"].plot(label='mean prediction')
# pred["pi_lower"].plot(linestyle='--', color='tab:blue', label='95% interval')
# pred["pi_upper"].plot(linestyle='--', color='tab:blue', | |
distributed around the
location given by the current state of the object, i.e., the current
parameter values. In each direction, the walker are randomly distributed
with a Gaussian distribution, whose default standard deviation is one.
The scales argument can be used to control the width of Gaussians used
to distribute the walkers.
sampleArgs : dictionary, optional
Number controlling the sampling process. Use 'burn' (int) to specify
the number of burn-in iterations (default is 0). Via 'iters' (int)
the numbers of iterations after the burn-in can be specified (default 1000).
The 'process' (int) key can be used to control the number of iterations after
which the progress bar is updated (default is iters/100). Note that the
'progressbar' package must be installed to get a progress bar. Otherwise
more mundane print statements will be used.
priors : dictionary, optional
For each parameter, a primary can be specified. In particular, a
prior is a callable, which is called with two arguments: first, a
dictionary mapping the names of the free parameters to their
current values, and second, a string specifying the name of the
parameter for which the prior is to apply. The return value must be
the logarithmic prior probability (natural logarithm). A number of default
priors are available in the form of the `FuFPrior` class. By
default, a uniform (improper) prior is used for all parameter, for
which no other prior was specified.
pots : list, optional
A list of 'potentials'. A potential is a function, which is called using
a dictionary holding the current value for all parameters and returns
the logarithm of the associated probability. Potentials may, e.g., be
used to implement certain relations between parameter values not otherwise
accounted for.
dbfile : string, optional
The result of the sampling, i.e., the chain(s), the corresponding
values of the posterior, and the names of the free parameters are
saved to the specified file (by default 'chain.emcee' is used).
The traces stored there can be analyzed using the 'TraceAnalysis'
class. Set this parameter to 'None' to avoid saving the results.
ps : tuple, optional
A tuple holding the current position and state of the sampler. This
tuple is returned by this method. The `ps` argument can be used
to continue sampling from the last state. Note that no burn-in will
ne carried out and the other arguments should be given as previously
to continue sampling successfully.
emcp : dictionary, optional
Extra arguments handed to `EnsembleSampler` object.
toMD : boolean, optional
If True (default), the object is set to the lowest-deviance solution
after sampling. Otherwise, it remains in a random state.
"""
if not ic.check["emcee"]:
raise(PE.PyARequiredImport("Could not import the 'emcee' package.",
solution="Please install 'emcee'."))
if (not x is None) and (not y is None) and (not yerr is None):
# Assign attributes and check x, y, and yerr.
self._fufDS = FufDS(x, y, yerr)
elif (not x is None) and (not y is None) and (yerr is None):
raise(PE.PyAValError("An error on the y values is required.",
where="fitEMCEE",
solution="Please specify 'yerr'"))
if self._fufDS is None:
raise(PE.PyAValError("Please specify the data completely.",
where="fitEMCEE",
solution="Specify x, y, and yerr."))
if not self._fufDS.xyyerrDefined():
raise(PE.PyAValError("Please specify the data completely. Either of x, y, and/or yerr are missing.",
where="fitEMCEE",
solution="Specify x, y, and yerr."))
# Names and values of free parameters
fps = self.freeParameters()
# Names of the free parameters in specific order
fpns = self.freeParamNames()
# Number of dimensions
ndims = len(fps)
if ndims == 0:
raise(PE.PyAValError("At least one free parameter is required for sampling.",
where="fitEMCEE",
solution="Use 'thaw' to free same parameters."))
if not dbfile is None:
if re.match(".*\.emcee$", dbfile) is None:
PE.warn(PE.PyAValError("The db filename (" + str(dbfile) + ") does not end in .emcee. TraceAnalysis will not recognize it as an emcee trace file.",
solution="Use a filename of the form *.emcee"))
# Number of walkers
if nwalker is None:
self.nwalker = ndims * 2
else:
self.nwalker = nwalker
if self.nwalker < ndims * 2:
raise(PE.PyAValError("The number of walkers must be at least twice the number of free parameters.",
where="fitEMCEE",
solution="Increase the number of walkers."))
if self.nwalker % 2 == 1:
raise(PE.PyAValError("The number of walkers must be even.",
where="fitEMCEE",
solution="Use an even number of walkers."))
# Use default prior for those parameters not listed
if priors is None:
priors = {}
for n in fpns:
if not n in priors:
priors[n] = FuFPrior("uniform")
# Ensure that potentials is at least an empty list
if pots is None:
pots = []
# Chi square calculator
chisqr = self.__chiSqr()
def likeli(names, vals):
# The likelihood function
likeli = -0.5 * chisqr(vals)
return likeli
def lnpostdf(values):
# Parameter-Value dictionary
ps = dict(zip(fpns, values))
# Check prior information
prior_sum = 0
for name in fpns:
prior_sum += priors[name](ps, name)
# If log prior is negative infinity, parameters
# are out of range, so no need to evaluate the
# likelihood function at this step:
pdf = prior_sum
if pdf == -np.inf:
return pdf
# Likelihood
pdf += likeli(fpns, values)
# Add information from potentials
for p in pots:
pdf += p(ps)
if np.isnan(pdf):
raise(PE.PyAValError("Posterior value is NaN for parameters: " + str(self.parameters()) + ".", \
where="fitEmcee", \
solution="Possibly, a prior (e.g., 'limuniform') can be used to restrict parameter range. " + \
"Note that restrictions are not automatically converted into priors."))
return pdf
# Set default values for sampleArgs
if sampleArgs is None:
sampleArgs = {}
if not "burn" in sampleArgs:
sampleArgs["burn"] = 0
if not "iters" in sampleArgs:
sampleArgs["iters"] = 1000
if not "progress" in sampleArgs:
sampleArgs["progress"] = sampleArgs["iters"] / 100
if ps is None:
if emcp is None:
emcp = {}
# Generate the sampler
self.emceeSampler = emcee.EnsembleSampler(
self.nwalker, ndims, lnpostdf, **emcp)
if scales is None:
scales = {}
# Generate starting values
pos = []
rrs = self.pars.getRestrictions()
for _ in smo.range(self.nwalker):
pos.append(np.zeros(ndims))
for i, n in enumerate(fpns):
if not n in scales:
s = 1.0
else:
s = scales[n]
# Trial counter -- avoid values beyond restrictions
tc = 0
while True:
if tc == 100:
raise(PE.PyAAlgorithmFailure("Could not determine valid starting point for parameter: " + str(fps) + " due to restrictions", \
where="fitEmcee", \
solution=["Try to use 'scale' to limit range of trial starting values.", \
"Change starting value before MCMC call into valid range."]))
propval = np.random.normal(fps[n], s)
if n in rrs:
# There is a restriction
if (not rrs[n][0] is None) and (propval < rrs[n][0]):
tc += 1
continue
if (not rrs[n][1] is None) and (propval > rrs[n][1]):
tc += 1
continue
break
pos[-1][i] = propval
# Default value for state
state = None
if sampleArgs["burn"] > 0:
# Run burn-in
pos, prob, state = self.emceeSampler.run_mcmc(
pos, sampleArgs["burn"])
# Reset the chain to remove the burn-in samples.
self.emceeSampler.reset()
else:
# Assign position and state from previous run
pos, state = ps
if (not sampleArgs["progress"] is None) and ic.check["progressbar"]:
widgets = ['EMCEE progress: ', progressbar.Percentage(), ' ', progressbar.Bar(marker=progressbar.RotatingMarker()),
' ', progressbar.ETA()]
pbar = progressbar.ProgressBar(
widgets=widgets, maxval=sampleArgs["iters"]).start()
n = 0
# Manage emcee 2/3 incompatibility
try:
sit = self.emceeSampler.sample(pos, rstate0=state, iterations=sampleArgs["iters"], thin=1, storechain=True)
except TypeError:
# emcee 3
sit = self.emceeSampler.sample(pos, rstate0=state, iterations=sampleArgs["iters"], thin=1, store=True)
for pos, prob, state in sit:
n += 1
if (not sampleArgs["progress"] is None) and (n % sampleArgs["progress"] == 0):
if ic.check["progressbar"]:
pbar.update(n)
else:
print("EMCEE: Reached iteration ",
n, " of ", sampleArgs["iters"])
# Save the chain to a file
if not dbfile is None:
np.savez_compressed(open(dbfile, 'wb'), chain=self.emceeSampler.chain, lnp=self.emceeSampler.lnprobability,
pnames=np.array(fpns, dtype=np.unicode_))
if toMD:
# Set to lowest-deviance (highest likelihood) solution
indimin = np.argmax(self.emceeSampler.lnprobability)
for i, p in enumerate(self.freeParamNames()):
self[p] = self.emceeSampler.flatchain[indimin, i]
return pos, state
def _resolveMinAlgo(self, minAlgo, default=None, mAA=None):
"""
Resolve minimization algorithm (minAlgo).
Parameters
----------
minAlgo : callable, string, or None
If None, the default will be used. If it is
a callable, it will | |
filelines[i][0] == '-' and i < 25:
startLine = i + 1
if startLine == 0:
logger.error('Sounding file format does not follow format standards!')
self._weatherLoaded = False
# Import data into variables
for line in filelines[startLine:]:
try:
data = line.split()
if len(data) == 11:
PRESS.append(float(data[0]))
HGHT.append(float(data[1]))
TEMP.append(float(data[2]))
DRCT.append(float(data[6]))
SKNT.append(float(data[7]))
except ValueError:
# End of useful data
break
logger.debug('Data imported.')
if self._process_sounding_data(PRESS, HGHT, TEMP, DRCT, SKNT):
self._weatherLoaded = True
logger.debug('Weather successfully loaded.')
else:
self._weatherLoaded = False
return
def _process_sounding_data(self, PRESS, HGHT, TEMP, DRCT, SKNT):
"""
This is an internal method responsible for processing raw data
fetched from the sounding file.
It requires pressure, altitude, temperature, wind speed and
direction as inputs. All the inputs are cleaned up, prepared for
interpolation, interpolated and then stored in the appropriate
variables.
This method is independent of the sounding file format
"""
# Convert to Numpy arrays
HGHT = numpy.array(HGHT)
PRESS = numpy.array(PRESS)
TEMP = numpy.array(TEMP)
DRCT = numpy.array(DRCT)
SKNT = numpy.array(SKNT)
# Remove duplicate altitudes and trim the other data accordingly
HGHT, indexes = numpy.unique(HGHT, return_index=True)
PRESS = PRESS[indexes]
TEMP = TEMP[indexes]
DRCT = DRCT[indexes]
SKNT = SKNT[indexes]
logger.debug('Duplicate altitudes removed.')
# Remove NaN entries, if any
nanValidator = numpy.array([HGHT, PRESS, TEMP, DRCT, SKNT])
nanValidator.transpose()
nanFree = nanValidator[~numpy.isnan(nanValidator).any(1)]
nanFree.transpose()
HGHT = numpy.array(nanFree[0])
PRESS = numpy.array(nanFree[1])
TEMP = numpy.array(nanFree[2])
DRCT = numpy.array(nanFree[3])
SKNT = numpy.array(nanFree[4])
logger.debug('NaN entries removed.')
# _______________________________________________________________ #
# Add missing data if launch site elevation or max altitude are out
# of sounding bounds
# If the elevation is lower than the lower bound of data, copy the
# lowest data available and use it for the launch site elevation.
if self.launchSiteElev < HGHT[0]:
HGHT = numpy.insert(HGHT, 0, self.launchSiteElev)
PRESS = numpy.insert(PRESS, 0, PRESS[0])
TEMP = numpy.insert(TEMP, 0, TEMP[0])
DRCT = numpy.insert(DRCT, 0, DRCT[0])
SKNT = numpy.insert(SKNT, 0, SKNT[0])
logger.debug('Launch site elevation out of bounds. Low altitude data generated.')
# If the maxAltitude is higher than the upper bound of data, fill
# the missing altitude levels in with ISA data
if self.maxAltitude > HGHT[-1]:
newHeights = numpy.arange(HGHT[-1] + 1, self.maxAltitude + 1,
(self.maxAltitude - HGHT[-1] - 1) / 20.)
HGHTTEMP = numpy.append(HGHT, newHeights[3:])
for newTempHeight in newHeights[3:]:
# Calculate new temperatures and pressures.
_, newTemp, _, newPress, _ = tools.ISAatmosphere(
altitude=tools.m2feet(newTempHeight))
TEMP = numpy.append(TEMP, newTemp)
PRESS = numpy.append(PRESS, newPress)
if self.maxAltitude > 25000 and HGHT[-1] < 25000:
HGHTSKNT = numpy.append(HGHT, [25000, self.maxAltitude])
SKNT = numpy.append(SKNT, [0, 0])
else:
HGHTSKNT = numpy.append(HGHT, [self.maxAltitude])
SKNT = numpy.append(SKNT, [0])
HGHT = numpy.append(HGHT, self.maxAltitude)
DRCT = numpy.append(DRCT, DRCT[-1])
logger.debug('Max altitude out of bounds. High altitude data generated.')
else:
HGHTTEMP = HGHT
HGHTSKNT = HGHT
# _______________________________________________________________ #
# Check whether all fields have data, otherwise fill up vectors with NaN
if numpy.size(HGHT) == 0 or numpy.size(TEMP) == 0 or\
numpy.size(PRESS) == 0 or numpy.size(DRCT) == 0 or\
numpy.size(SKNT) == 0:
logger.error('There was a problem while processing the sounding.')
return False
# Interpolate data
logger.debug('Beginning interpolation...')
# TODO: Fix this part, it's unreadable and difficult to debug
temperatureInterpolation = UnivariateSpline(HGHTTEMP, TEMP,
s=self._interpolationPrecision)
pressureInterpolation = UnivariateSpline(HGHTTEMP, PRESS,
s=self._interpolationPrecision)
windDirectionInterpolation = UnivariateSpline(HGHT, DRCT,
s=self._interpolationPrecision)
windSpeedInterpolation = UnivariateSpline(HGHTSKNT, SKNT,
s=self._interpolationPrecision)
def getTemperature(*args):
if len(args) == 1:
return temperatureInterpolation(args[0])
elif len(args) == 4:
return temperatureInterpolation(args[2])
else:
return numpy.nan
def getPressure(*args):
if len(args) == 1:
return pressureInterpolation(args[0])
elif len(args) == 4:
return pressureInterpolation(args[2])
else:
return numpy.nan
def getWindDirection(*args):
if len(args) == 1:
return windDirectionInterpolation(args[0])
elif len(args) == 4:
return windDirectionInterpolation(args[2])
else:
return numpy.nan
def getWindSpeed(*args):
if len(args) == 1:
return windSpeedInterpolation(args[0])
elif len(args) == 4:
return windSpeedInterpolation(args[2])
else:
return numpy.nan
# Store the interpolators in the correct variables
self.getTemperature = getTemperature
self.getPressure = getPressure
self.getWindDirection = getWindDirection
self.getWindSpeed = getWindSpeed
logger.debug('Interpolation completed. Preparing derived functions...')
# Initialize derived functions
AirMolecMass = 0.02896
GasConstant = 8.31447
standardTempRankine = tools.c2kel(15) * (9. / 5)
Mu0 = 0.01827 # Mu 0 (15 deg) [cP]
C = 120 # Sutherland's Constant
def getDensity(*args):
if len(args) == 1:
return self.getPressure(args[0]) * 100 * AirMolecMass / (
GasConstant * tools.c2kel(self.getTemperature(args[0])))
elif len(args) == 4:
return self.getPressure(args[0], args[1], args[2], args[3])\
* 100 * AirMolecMass / (GasConstant * tools.c2kel(
self.getTemperature(args[0],
args[1],
args[2],
args[3])))
else:
return numpy.nan
def getViscosity(*args):
if len(args) == 1:
tempRankine = tools.c2kel(self.getTemperature(args[0])) * (9. / 5)
TTO = (tempRankine / standardTempRankine) ** 1.5 # T/TO [Rankine/Rankine]
TR = ((0.555 * standardTempRankine) + C) / ((0.555 * tempRankine) + C)
vcP = Mu0 * TTO * TR
return vcP / 1000.
elif len(args) == 4:
tempRankine = tools.c2kel(self.getTemperature(args[0],args[1], args[2], args[3])) * (9. / 5)
TTO = (tempRankine / standardTempRankine) ** 1.5 # T/TO [Rankine/Rankine]
TR = ((0.555 * standardTempRankine) + C) / ((0.555 * tempRankine) + C)
vcP = Mu0 * TTO * TR
return vcP / 1000.
else:
return numpy.nan
self.getDensity = getDensity
self.getViscosity = getViscosity
logger.debug('All derived functions ready.')
return True
def make_perturbedWind(self, iDevTime, iDevSpace, randomChance, resultType=None):
"""
Constructor function responsible for applying perturbations to wind
profiles and returning closure functions.
"""
# Interpolate the deviations
timeDeviationU = UnivariateSpline(wind_time_perturbation.AltitudesM[:31],
wind_time_perturbation.M_U_Kts[iDevTime], s=5)
timeDeviationV = UnivariateSpline(wind_time_perturbation.AltitudesM[:31],
wind_time_perturbation.M_V_Kts[iDevTime], s=5)
spaceDeviationU = UnivariateSpline(wind_space_perturbation.AltitudesM[:31],
wind_space_perturbation.M_U_Kts[iDevSpace], s=5)
spaceDeviationV = UnivariateSpline(wind_space_perturbation.AltitudesM[:31],
wind_space_perturbation.M_V_Kts[iDevSpace], s=5)
def perturbedWind(*args):
if len(args) == 1:
altitude = args[0]
elif len(args) == 4:
altitude = args[2]
else:
return numpy.nan
# Convert non-perturbed wind direction and speed to u- and v-components
UKts, VKts = tools.dirspeed2uv(self.getWindDirection(altitude),
self.getWindSpeed(altitude))
# Apply the time perturbation
if randomChance[0] < 0.5:
UKts -= timeDeviationU(altitude) * self.timeFromSounding
else:
UKts += timeDeviationU(altitude) * self.timeFromSounding
if randomChance[1] < 0.5:
VKts -= timeDeviationV(altitude) * self.timeFromSounding
else:
VKts += timeDeviationV(altitude) * self.timeFromSounding
# Apply the space perturbation
if randomChance[2] < 0.5:
UKts -= spaceDeviationU(altitude) * self.distanceFromSounding
else:
UKts += spaceDeviationU(altitude) * self.distanceFromSounding
if randomChance[3] < 0.5:
VKts -= spaceDeviationV(altitude) * self.distanceFromSounding
else:
VKts += spaceDeviationV(altitude) * self.distanceFromSounding
# Reconvert wind u- and v-components to direction and speed
newDir, newSpd = tools.uv2dirspeed(UKts, VKts)
# Deliver the results
if resultType is None:
return newDir, newSpd
elif resultType == 'direction':
return newDir
elif resultType == 'speed':
return newSpd
else:
return
return perturbedWind
def perturbWind(self, numberOfFlights):
"""
Perturb the wind profiles for the purpose of Monte Carlo simulations.
Given the numberOfFlights, this method generates N different perturbed
wind profiles, where N = numberOfFlights, and stores them in the
getMCWindDirection and getMCWindSpeed lists. The perturbation is based
on the distance and time from sounding and is performed by picking a
random experimentally-measured perturbation and applying it to each
wind profile.
Wind data should then be requested using the following syntax:
getMCWindDirection[flightNumber].getTemperature(lat,lon,alt,time)
where flightNumber should be in the range 0...numberOfFlights-1.
"""
# Before anything, check that the weather is loaded.
if not self._weatherLoaded:
logger.error(
'Weather not loaded! You need to load a sounding or download weather before perturbing the wind!')
return
self.getMCWindDirection = []
self.getMCWindSpeed = []
# Apply perturbations to flights
for _ in range(numberOfFlights):
# Pick a random deviation out of the available ones.
devTime = numpy.random.random_integers(0, 1758)
devSpace = numpy.random.random_integers(0, 896)
randChance = numpy.random.random(4)
# Perturb and store
self.getMCWindDirection.append(self.make_perturbedWind(devTime,
devSpace, randChance, 'direction'))
self.getMCWindSpeed.append(self.make_perturbedWind(devTime, devSpace,
randChance, 'speed'))
return None
class forecastEnvironment(environment):
"""
Class responsible for downloading weather forecast data from the Global
Forecast System (GFS) and generating a forecast-based atmospheric model.
Parameters
----------
launchSiteLat : float
latitude of the launch site [deg]
launchSiteLon : float
longitude of the launch site [deg]
launchSiteElev : float
elevation of the launch site above Mean Sea Level [m]
dateAndTime : :obj:`datetime.datetime`
The launch time
UTC_offset : float
the offset in hours between the current time zone and UTC (for example,
Florida in winter has a UTC_offset = -5)
inflationTemperature : float
the ambient temperature during the balloon inflation [degC]
[forceNonHD] : bool (default False)
if TRUE, the weather forecast download will be forced to a lower
resolution (i.e. 1deg x 1deg)
[forecastDuration] : float (default 4)
The number of hours from dateAndTime for which to download weather data
[use_async] | |
from __future__ import division, absolute_import, print_function
from rdkit import Chem, Geometry
from simtk.openmm import app, Vec3
from simtk import unit
import itertools
import pyparsing as pyp
from itertools import groupby
proteinResidues = ['ALA', 'ASN', 'CYS', 'GLU', 'HIS',
'LEU', 'MET', 'PRO', 'THR', 'TYR',
'ARG', 'ASP', 'GLN', 'GLY', 'ILE',
'LYS', 'PHE', 'SER', 'TRP', 'VAL']
rnaResidues = ['A', 'G', 'C', 'U', 'I']
dnaResidues = ['DA', 'DG', 'DC', 'DT', 'DI']
def rdmol_to_openmmTop(mol, confId = 0):
"""
This function converts an rdmol to an openmm topology
The rdmol coordinates are assumed to be in Angstrom unit
Parameters:
-----------
mol: rdmol molecule
The molecule to convert
confId: int
The id of the conformer from which coordinates will be taken from `mol`
Return:
-------
topology : OpenMM Topology
The generated OpenMM topology
positions : OpenMM Quantity
The molecule atom positions associated with the
generated topology in Angstrom units
"""
mol = Chem.MolFromPDBBlock(Chem.MolToPDBBlock(mol))# hacky way to get all atom have PDB file relevant fields
topology = app.Topology()
rdk_atom_to_openmm = {}
_chains = set([])
atoms_grouped = groupby(mol.GetAtoms(), lambda atm :
(atm.GetPDBResidueInfo().GetChainId(),
atm.GetPDBResidueInfo().GetResidueNumber(),
atm.GetPDBResidueInfo().GetResidueName())) #CESHI fails when not read from PDB
for key, residue in atoms_grouped:
chainId, resNum, resName = key
if chainId not in _chains:
_chains.add(chainId)
openmm_chain = topology.addChain(chainId)
openmm_res = topology.addResidue(resName, openmm_chain)
for atm in residue:
element = app.element.Element.getByAtomicNumber(atm.GetAtomicNum())
openmm_at = topology.addAtom(atm.GetPDBResidueInfo().GetName().strip() , element, openmm_res)
openmm_at.index = atm.GetIdx()
rdk_atom_to_openmm[atm.GetIdx()] = openmm_at
if topology.getNumAtoms() != mol.GetNumAtoms():
raise ValueError("OpenMM topology and RDMol number of atoms mismatching: "
"OpenMM = {} vs RDMol = {}".format(topology.getNumAtoms(), mol.GetNumAtoms()))
# Count the number of bonds in the openmm topology
omm_bond_count = 0
def IsAmideBond(rdk_bond):
# This supporting function checks if the passed bond is an amide bond or not.
# Our definition of amide bond C-N between a Carbon and a Nitrogen atom is:
# O
# ║
# CA or O-C-N-
# |
# The amide bond C-N is a single bond
if str(rdk_bond.GetBondType()) != "SINGLE" :
return False
atomB, atomE = rdk_bond.GetBeginAtom(), rdk_bond.GetEndAtom()
# The amide bond is made by Carbon and Nitrogen atoms
if not (atomB.GetAtomicNum() == 6 and atomE.GetAtomicNum() == 7 or
(atomB.GetAtomicNum() == 7 and atomE.GetAtomicNum() == 6)):
return False
# Select Carbon and Nitrogen atoms
if atomB.GetAtomicNum() == 6 :
C_atom = atomB
N_atom = atomE
else:
C_atom = atomE
N_atom = atomB
# Carbon and Nitrogen atoms must have 3 neighbour atoms
if not (C_atom.GetDegree() == 3 and N_atom.GetDegree() == 3): #CESHI
return False
double_bonds, single_bonds = 0, 0
for bond in C_atom.GetBonds():
# The C-O bond can be single or double.
if (bond.GetBeginAtom().GetAtomicNum() == 6 and bond.GetEndAtom().GetAtomicNum() == 8 ) or (bond.GetBeginAtom().GetAtomicNum() == 8 and bond.GetEndAtom().GetAtomicNum() == 6 ):
if str(bond.GetBondType()) == "DOUBLE":
double_bonds += 1
if str(bond.GetBondType()) == "SINGLE":
single_bonds += 1
# The CA-C bond is single
if (bond.GetBeginAtom().GetAtomicNum() == 6 and bond.GetEndAtom().GetAtomicNum() == 6 ):
if str(bond.GetBondType()) == "SINGLE":
single_bonds += 1
# Just one double and one single bonds are connected to C
# In this case the bond is an amide bond
if double_bonds == 1 and single_bonds == 1:
return True
else:
return False
# Creating bonds
for bond in mol.GetBonds():
omm_bond_count += 1
# Set the bond type
bond_order = bond.GetBondTypeAsDouble()
if IsAmideBond(bond):
omm_bond_type = "Amide"
elif bond_order == 1.0:
omm_bond_type = "Single"
elif bond_order == 2.0:
omm_bond_type = "Double"
elif bond_order == 3.0:
omm_bond_type = "Triple"
elif bond_order == 1.5:
omm_bond_type = "Aromatic"
else:
omm_bond_type = None
topology.addBond(
rdk_atom_to_openmm[bond.GetBeginAtom().GetIdx()],
rdk_atom_to_openmm[bond.GetEndAtom().GetIdx()],
type = omm_bond_type,
order = bond_order) #CESHI the bond order calculated is a double, supposedly OpenMM takes an int value
if omm_bond_count != mol.GetNumBonds():
raise ValueError("OpenMM topology and RDMol number of bonds mismatching: "
"OpenMM = {} vs RDMol = {}".format(omm_bond_count, mol.GetNumBonds()))
coords = mol.GetConformer(confId).GetPositions()
positions = [Vec3(v[0], v[1], v[2]) for v in coords] * unit.angstroms
return topology, positions
def openmmTop_to_rdmol(topology, positions, verbose = False):
"""
This function converts an OpenMM topology into a RDMol
Parameters:
-----------
topology : OpenMM Topology
The OpenMM topology
positions : OpenMM Quantity
The molecule atom positions associated with the
topology
Return:
-------
rdmol : RDMol
The generated RDKit molecule
"""
rdmol = Chem.RWMol()
# Mapping dictionary between openmm atoms and rdk atoms
openmm_atom_to_rdk_atom = {}
# Python set used to identify atoms that are not in protein residues
keep = set(proteinResidues).union(dnaResidues).union(rnaResidues)
#TODO charge info is not transferred
for chain in topology.chains():
chainId = str(chain.id)
for res in chain.residues():
resName, resNum= res.name, int(res.index)
for openmm_at in res.atoms():
rdatom = Chem.Atom(openmm_at.element._atomic_number)
info = Chem.AtomPDBResidueInfo()
info.SetName(openmm_at.name)
info.SetChainId(chainId)
info.SetResidueNumber(resNum)
info.SetResidueName(resName)
rdatom.SetMonomerInfo(info)
if resName not in keep:
rdatom.SetIsHeteroAtom()
rdmol.AddAtom(rdatom)
openmm_atom_to_rdk_atom[openmm_at] = rdmol.GetNumAtoms() - 1
if topology.getNumAtoms() != rdmol.GetNumAtoms():
raise ValueError("OpenMM topology and RDMol number of atoms mismatching: "
"OpenMM = {} vs RDMol = {}".format(topology.getNumAtoms(), rdmol.GetNumAtoms()))
# Count the number of bonds in the openmm topology
omm_bond_count = 0
# Create the bonds
_bondtypes = {0: Chem.BondType.UNSPECIFIED,
1: Chem.BondType.SINGLE,
1.5: Chem.BondType.AROMATIC,
2: Chem.BondType.DOUBLE,
3: Chem.BondType.TRIPLE,
4: Chem.BondType.QUADRUPLE,
5: Chem.BondType.QUINTUPLE,
6: Chem.BondType.HEXTUPLE,
7: Chem.BondType.ONEANDAHALF,}
for omm_bond in topology.bonds():
omm_bond_count += 1
at0 = omm_bond[0]
at1 = omm_bond[1]
rd_atom0, rd_atom1 = openmm_atom_to_rdk_atom[at0], openmm_atom_to_rdk_atom[at1]
if omm_bond.type == "Aromatic":
#CESHI assumed by setting bond aromatic the two atoms are aromatic
rdmol.AddBond(rd_atom0, rd_atom1, _bondtypes[1.5])
elif omm_bond.type == "Single":
rdmol.AddBond(rd_atom0, rd_atom1, _bondtypes[1])
elif omm_bond.type == "Double":
rdmol.AddBond(rd_atom0, rd_atom1, _bondtypes[2])
elif omm_bond.type == "Triple":
rdmol.AddBond(rd_atom0, rd_atom1, _bondtypes[3])
elif omm_bond.type == "Amide":
rdmol.AddBond(rd_atom0, rd_atom1, _bondtypes[int(omm_bond.order)])
else:
rdmol.AddBond(rd_atom0, rd_atom1, _bondtypes[0])
if topology.getNumAtoms() != rdmol.GetNumAtoms():
raise ValueError("OpenMM topology and RDMol number of bonds mismatching: "
"OpenMM = {} vs RDMol = {}".format(omm_bond_count, rdmol.GetNumBonds()))
pos = positions.in_units_of(unit.angstrom) / unit.angstrom
conformer = Chem.Conformer()
for idx,coord in enumerate(pos):
# x,y,z = [i._value for i in coord]
x,y,z = [i for i in coord]
conformer.SetAtomPosition(idx, Geometry.Point3D(x,y,z))
rdmol.AddConformer(conformer)
rdmol.UpdatePropertyCache(strict=False)
Chem.GetSSSR(rdmol)
return rdmol.GetMol()
def delete_shell(core_mol, del_mol, cut_off, in_out='in'):
"""
This function deletes molecules present in the passed argument
del_mol that are far (in_out=out) or close (in_out=in) than the
selected cutoff distance (in A) from the passed molecules core_mol
Parameters:
-----------
core_mol: OEMol molecule
The core molecules
del_mol: OEMol molecule
The molecules to be deleted if their distances from the core_mol
molecules are greater or closer that the selected cutoff distance
cut_off: python float number
The threshold distance in A used to mark atom for deletion
in_out: python string
A flag used to select if delete molecules far or close than
the cutoff distance from the core_mol
Return:
-------
reset_del: copy of del_mol where atoms have been deleted with
reset atom indexes
"""
def check_shell(core_mol, check_mol, cutoff):
"""
This function checks if at least one atomic distance from the passed
check_mol molecule to the core_mol molecule is less than the selected
cutoff distance in A.
Parameters:
-----------
core_mol: OEMol molecule
The core molecule
check_mol: OEMol molecule
The molecule to be checked if inside or outside a shell
surrounding the core_mole with radius equal to the cutoff
threshold
cut_off: python float number
The threshold distance in A used to mark atom inside or outside
the shell
Return:
-------
in_out: python boolean
True if at least one of check_mol atom distance from core_mole
is less than the selected cutoff threshold
"""
def sanitizeOEMolecule(molecule):
"""
This function checks if the molecule has coordinates,
explicit hydrogens, aromaticity missing and not unique
atom names. If the molecule does not have coordinates
a fatal error is raised. If the molecule does not have
hydrogens or aramatic flags are missing then a copy of
the molecule is fixed, if missing or not unique atom
names are found then a copy of the molecule is fixed
Parameters:
-----------
molecule: OEMol
The molecule to be checked
Return:
-------
mol_copy: OEMol
A copy of the checked molecule with fixed aromaticity,
hydrogens and unique atom names if they are missing
"""
def strip_water_ions(in_system):
"""
This function remove waters and ions molecules
from the input system
Parameters:
----------
in_system : oechem.OEMol
The bio-molecular system to clean
opt: python dictionary
The system option
Output:
-------
clean_system : oechem.OEMol
The cleaned system
"""
def | |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WafConfig(object):
"""
The Web Application Firewall configuration for the WAAS policy.
"""
def __init__(self, **kwargs):
"""
Initializes a new WafConfig object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param access_rules:
The value to assign to the access_rules property of this WafConfig.
:type access_rules: list[oci.waas.models.AccessRule]
:param address_rate_limiting:
The value to assign to the address_rate_limiting property of this WafConfig.
:type address_rate_limiting: oci.waas.models.AddressRateLimiting
:param captchas:
The value to assign to the captchas property of this WafConfig.
:type captchas: list[oci.waas.models.Captcha]
:param device_fingerprint_challenge:
The value to assign to the device_fingerprint_challenge property of this WafConfig.
:type device_fingerprint_challenge: oci.waas.models.DeviceFingerprintChallenge
:param good_bots:
The value to assign to the good_bots property of this WafConfig.
:type good_bots: list[oci.waas.models.GoodBot]
:param human_interaction_challenge:
The value to assign to the human_interaction_challenge property of this WafConfig.
:type human_interaction_challenge: oci.waas.models.HumanInteractionChallenge
:param js_challenge:
The value to assign to the js_challenge property of this WafConfig.
:type js_challenge: oci.waas.models.JsChallenge
:param origin:
The value to assign to the origin property of this WafConfig.
:type origin: str
:param caching_rules:
The value to assign to the caching_rules property of this WafConfig.
:type caching_rules: list[oci.waas.models.CachingRule]
:param custom_protection_rules:
The value to assign to the custom_protection_rules property of this WafConfig.
:type custom_protection_rules: list[oci.waas.models.CustomProtectionRuleSetting]
:param origin_groups:
The value to assign to the origin_groups property of this WafConfig.
:type origin_groups: list[str]
:param protection_rules:
The value to assign to the protection_rules property of this WafConfig.
:type protection_rules: list[oci.waas.models.ProtectionRule]
:param protection_settings:
The value to assign to the protection_settings property of this WafConfig.
:type protection_settings: oci.waas.models.ProtectionSettings
:param threat_feeds:
The value to assign to the threat_feeds property of this WafConfig.
:type threat_feeds: list[oci.waas.models.ThreatFeed]
:param whitelists:
The value to assign to the whitelists property of this WafConfig.
:type whitelists: list[oci.waas.models.Whitelist]
"""
self.swagger_types = {
'access_rules': 'list[AccessRule]',
'address_rate_limiting': 'AddressRateLimiting',
'captchas': 'list[Captcha]',
'device_fingerprint_challenge': 'DeviceFingerprintChallenge',
'good_bots': 'list[GoodBot]',
'human_interaction_challenge': 'HumanInteractionChallenge',
'js_challenge': 'JsChallenge',
'origin': 'str',
'caching_rules': 'list[CachingRule]',
'custom_protection_rules': 'list[CustomProtectionRuleSetting]',
'origin_groups': 'list[str]',
'protection_rules': 'list[ProtectionRule]',
'protection_settings': 'ProtectionSettings',
'threat_feeds': 'list[ThreatFeed]',
'whitelists': 'list[Whitelist]'
}
self.attribute_map = {
'access_rules': 'accessRules',
'address_rate_limiting': 'addressRateLimiting',
'captchas': 'captchas',
'device_fingerprint_challenge': 'deviceFingerprintChallenge',
'good_bots': 'goodBots',
'human_interaction_challenge': 'humanInteractionChallenge',
'js_challenge': 'jsChallenge',
'origin': 'origin',
'caching_rules': 'cachingRules',
'custom_protection_rules': 'customProtectionRules',
'origin_groups': 'originGroups',
'protection_rules': 'protectionRules',
'protection_settings': 'protectionSettings',
'threat_feeds': 'threatFeeds',
'whitelists': 'whitelists'
}
self._access_rules = None
self._address_rate_limiting = None
self._captchas = None
self._device_fingerprint_challenge = None
self._good_bots = None
self._human_interaction_challenge = None
self._js_challenge = None
self._origin = None
self._caching_rules = None
self._custom_protection_rules = None
self._origin_groups = None
self._protection_rules = None
self._protection_settings = None
self._threat_feeds = None
self._whitelists = None
@property
def access_rules(self):
"""
Gets the access_rules of this WafConfig.
The access rules applied to the Web Application Firewall. Used for defining custom access policies with the combination of `ALLOW`, `DETECT`, and `BLOCK` rules, based on different criteria.
:return: The access_rules of this WafConfig.
:rtype: list[oci.waas.models.AccessRule]
"""
return self._access_rules
@access_rules.setter
def access_rules(self, access_rules):
"""
Sets the access_rules of this WafConfig.
The access rules applied to the Web Application Firewall. Used for defining custom access policies with the combination of `ALLOW`, `DETECT`, and `BLOCK` rules, based on different criteria.
:param access_rules: The access_rules of this WafConfig.
:type: list[oci.waas.models.AccessRule]
"""
self._access_rules = access_rules
@property
def address_rate_limiting(self):
"""
Gets the address_rate_limiting of this WafConfig.
The IP address rate limiting settings used to limit the number of requests from an address.
:return: The address_rate_limiting of this WafConfig.
:rtype: oci.waas.models.AddressRateLimiting
"""
return self._address_rate_limiting
@address_rate_limiting.setter
def address_rate_limiting(self, address_rate_limiting):
"""
Sets the address_rate_limiting of this WafConfig.
The IP address rate limiting settings used to limit the number of requests from an address.
:param address_rate_limiting: The address_rate_limiting of this WafConfig.
:type: oci.waas.models.AddressRateLimiting
"""
self._address_rate_limiting = address_rate_limiting
@property
def captchas(self):
"""
Gets the captchas of this WafConfig.
A list of CAPTCHA challenge settings. These are used to challenge requests with a CAPTCHA to block bots.
:return: The captchas of this WafConfig.
:rtype: list[oci.waas.models.Captcha]
"""
return self._captchas
@captchas.setter
def captchas(self, captchas):
"""
Sets the captchas of this WafConfig.
A list of CAPTCHA challenge settings. These are used to challenge requests with a CAPTCHA to block bots.
:param captchas: The captchas of this WafConfig.
:type: list[oci.waas.models.Captcha]
"""
self._captchas = captchas
@property
def device_fingerprint_challenge(self):
"""
Gets the device_fingerprint_challenge of this WafConfig.
The device fingerprint challenge settings. Used to detect unique devices based on the device fingerprint information collected in order to block bots.
:return: The device_fingerprint_challenge of this WafConfig.
:rtype: oci.waas.models.DeviceFingerprintChallenge
"""
return self._device_fingerprint_challenge
@device_fingerprint_challenge.setter
def device_fingerprint_challenge(self, device_fingerprint_challenge):
"""
Sets the device_fingerprint_challenge of this WafConfig.
The device fingerprint challenge settings. Used to detect unique devices based on the device fingerprint information collected in order to block bots.
:param device_fingerprint_challenge: The device_fingerprint_challenge of this WafConfig.
:type: oci.waas.models.DeviceFingerprintChallenge
"""
self._device_fingerprint_challenge = device_fingerprint_challenge
@property
def good_bots(self):
"""
Gets the good_bots of this WafConfig.
A list of bots allowed to access the web application.
:return: The good_bots of this WafConfig.
:rtype: list[oci.waas.models.GoodBot]
"""
return self._good_bots
@good_bots.setter
def good_bots(self, good_bots):
"""
Sets the good_bots of this WafConfig.
A list of bots allowed to access the web application.
:param good_bots: The good_bots of this WafConfig.
:type: list[oci.waas.models.GoodBot]
"""
self._good_bots = good_bots
@property
def human_interaction_challenge(self):
"""
Gets the human_interaction_challenge of this WafConfig.
The human interaction challenge settings. Used to look for natural human interactions such as mouse movements, time on site, and page scrolling to identify bots.
:return: The human_interaction_challenge of this WafConfig.
:rtype: oci.waas.models.HumanInteractionChallenge
"""
return self._human_interaction_challenge
@human_interaction_challenge.setter
def human_interaction_challenge(self, human_interaction_challenge):
"""
Sets the human_interaction_challenge of this WafConfig.
The human interaction challenge settings. Used to look for natural human interactions such as mouse movements, time on site, and page scrolling to identify bots.
:param human_interaction_challenge: The human_interaction_challenge of this WafConfig.
:type: oci.waas.models.HumanInteractionChallenge
"""
self._human_interaction_challenge = human_interaction_challenge
@property
def js_challenge(self):
"""
Gets the js_challenge of this WafConfig.
The JavaScript challenge settings. Used to challenge requests with a JavaScript challenge and take the action if a browser has no JavaScript support in order to block bots.
:return: The js_challenge of this WafConfig.
:rtype: oci.waas.models.JsChallenge
"""
return self._js_challenge
@js_challenge.setter
def js_challenge(self, js_challenge):
"""
Sets the js_challenge of this WafConfig.
The JavaScript challenge settings. Used to challenge requests with a JavaScript challenge and take the action if a browser has no JavaScript support in order to block bots.
:param js_challenge: The js_challenge of this WafConfig.
:type: oci.waas.models.JsChallenge
"""
self._js_challenge = js_challenge
@property
def origin(self):
"""
Gets the origin of this WafConfig.
The key in the map of origins referencing the origin used for the Web Application Firewall. The origin must already be included in `Origins`. Required when creating the `WafConfig` resource, but not on update.
:return: The origin of this WafConfig.
:rtype: str
"""
return self._origin
@origin.setter
def origin(self, origin):
"""
Sets the origin of this WafConfig.
The key in the map of origins referencing the origin used for the Web Application Firewall. The origin must already be included in `Origins`. Required when creating the `WafConfig` resource, but not on update.
:param origin: The origin of this WafConfig.
:type: str
"""
self._origin = origin
@property
def caching_rules(self):
"""
Gets the caching_rules of this WafConfig.
A list of caching rules applied to the web application.
:return: The caching_rules of this WafConfig.
:rtype: list[oci.waas.models.CachingRule]
"""
return self._caching_rules
@caching_rules.setter
def caching_rules(self, caching_rules):
"""
Sets the caching_rules of this WafConfig.
A list of caching rules applied to the web application.
:param caching_rules: The caching_rules of this WafConfig.
:type: list[oci.waas.models.CachingRule]
"""
self._caching_rules = caching_rules
@property
def custom_protection_rules(self):
"""
Gets the custom_protection_rules of this WafConfig.
A list of the custom protection rule OCIDs and their actions.
:return: The custom_protection_rules of this WafConfig.
:rtype: list[oci.waas.models.CustomProtectionRuleSetting]
"""
return self._custom_protection_rules
@custom_protection_rules.setter
def custom_protection_rules(self, custom_protection_rules):
"""
Sets the custom_protection_rules of this WafConfig.
A list | |
0.5*m.b557*m.b590 + 0.5*m.b557*m.b615 + 0.5*m.b557*m.b616 +
0.5*m.b557*m.b621 + 0.5*m.b557*m.b652 + 0.5*m.b557*m.b659 + 0.5*m.b557*m.b663 + 0.5*m.b557*m.b704
+ 0.5*m.b557*m.b749 + 0.5*m.b557*m.b759 + 0.5*m.b557*m.b788 + 0.5*m.b557*m.b789 + 0.5*m.b558*
m.b561 + 0.5*m.b558*m.b576 + 0.5*m.b558*m.b587 + 0.5*m.b558*m.b603 + 0.5*m.b558*m.b608 + 0.5*
m.b558*m.b628 + 0.5*m.b558*m.b631 + 0.5*m.b558*m.b638 + 0.5*m.b558*m.b639 + 0.5*m.b558*m.b646 +
0.5*m.b558*m.b651 + 0.5*m.b558*m.b658 + 0.5*m.b558*m.b660 + 0.5*m.b558*m.b662 + 0.5*m.b558*m.b671
+ 0.5*m.b558*m.b678 + m.b558*m.x861 + 0.5*m.b559*m.b561 + 0.5*m.b559*m.b567 + 0.5*m.b559*m.b581
+ 0.5*m.b559*m.b582 + 0.5*m.b559*m.b591 + 0.5*m.b559*m.b595 + 0.5*m.b559*m.b602 + 0.5*m.b559*
m.b611 + 0.5*m.b559*m.b614 + 0.5*m.b559*m.b619 + 0.5*m.b559*m.b626 + 0.5*m.b559*m.b627 + 0.5*
m.b559*m.b632 + 0.5*m.b559*m.b635 + 0.5*m.b559*m.b657 + m.b559*m.b661 + 0.5*m.b559*m.b670 + 0.5*
m.b559*m.b671 + m.b559*m.b672 + 0.5*m.b559*m.b678 + 0.5*m.b559*m.b714 + 0.5*m.b559*m.b761 + 0.5*
m.b559*m.b765 + 0.5*m.b559*m.b790 + 0.5*m.b559*m.b798 + 0.5*m.b559*m.b804 + 0.5*m.b559*m.b809 +
0.5*m.b559*m.b811 + 0.5*m.b559*m.b816 + 0.5*m.b559*m.b823 + 0.5*m.b559*m.b826 + 0.5*m.b560*m.b584
+ 0.5*m.b560*m.b617 + 0.5*m.b560*m.b643 + 0.5*m.b560*m.b644 + 0.5*m.b560*m.b649 + 0.5*m.b560*
m.b679 + 0.5*m.b561*m.b567 + 0.5*m.b561*m.b576 + 0.5*m.b561*m.b581 + 0.5*m.b561*m.b582 + 0.5*
m.b561*m.b595 + 0.5*m.b561*m.b611 + 0.5*m.b561*m.b614 + 0.5*m.b561*m.b619 + 0.5*m.b561*m.b626 +
0.5*m.b561*m.b627 + 0.5*m.b561*m.b631 + 0.5*m.b561*m.b632 + 0.5*m.b561*m.b635 + 0.5*m.b561*m.b651
+ 0.5*m.b561*m.b657 + 0.5*m.b561*m.b658 + 0.5*m.b561*m.b661 + 0.5*m.b561*m.b662 + 0.5*m.b561*
m.b670 + 0.5*m.b561*m.b672 + 0.5*m.b562*m.b566 + 0.5*m.b562*m.b569 + 0.5*m.b562*m.b570 + 0.5*
m.b562*m.b572 + m.b562*m.b574 + 0.5*m.b562*m.b576 + 0.5*m.b562*m.b583 + 0.5*m.b562*m.b586 + 0.5*
m.b562*m.b587 + 0.5*m.b562*m.b591 + 0.5*m.b562*m.b602 + 0.5*m.b562*m.b603 + m.b562*m.b605 + 0.5*
m.b562*m.b608 + 0.5*m.b562*m.b623 + 0.5*m.b562*m.b628 + 0.5*m.b562*m.b641 + 0.5*m.b562*m.b645 +
0.5*m.b562*m.b648 + 0.5*m.b562*m.b650 + 0.5*m.b562*m.b656 + 0.5*m.b562*m.b658 + 0.5*m.b562*m.b662
+ 0.5*m.b562*m.b664 + 0.5*m.b562*m.b666 + 0.5*m.b562*m.b670 + 0.5*m.b562*m.b673 + 0.5*m.b562*
m.b674 + 0.5*m.b562*m.b676 + 0.5*m.b562*m.b681 + 0.5*m.b563*m.b567 + 0.5*m.b563*m.b568 + 0.5*
m.b563*m.b583 + 0.5*m.b563*m.b585 + 0.5*m.b563*m.b588 + 0.5*m.b563*m.b595 + 0.5*m.b563*m.b601 +
0.5*m.b563*m.b606 + 0.5*m.b563*m.b613 + 0.5*m.b563*m.b620 + 0.5*m.b563*m.b624 + 0.5*m.b563*m.b635
+ 0.5*m.b563*m.b636 + 0.5*m.b563*m.b647 + 0.5*m.b564*m.b568 + m.b564*m.b573 + 0.5*m.b564*m.b593
+ 0.5*m.b564*m.b598 + 0.5*m.b564*m.b601 + 0.5*m.b564*m.b613 + 0.5*m.b564*m.b620 + m.b564*m.b622
+ 0.5*m.b564*m.b625 + 0.5*m.b564*m.b643 + 0.5*m.b564*m.b647 + 0.5*m.b564*m.b668 + m.b564*m.x857
+ 0.5*m.b565*m.b575 + 0.5*m.b565*m.b577 + 0.5*m.b565*m.b579 + 0.5*m.b565*m.b580 + 0.5*m.b565*
m.b589 + 0.5*m.b565*m.b592 + m.b565*m.b594 + 0.5*m.b565*m.b597 + 0.5*m.b565*m.b607 + 0.5*m.b565*
m.b642 + 0.5*m.b565*m.b653 + 0.5*m.b565*m.b659 + 0.5*m.b565*m.b663 + 0.5*m.b565*m.b667 + 0.5*
m.b565*m.b680 + 0.5*m.b566*m.b570 + 0.5*m.b566*m.b572 + 0.5*m.b566*m.b574 + 0.5*m.b566*m.b587 +
0.5*m.b566*m.b603 + 0.5*m.b566*m.b605 + 0.5*m.b566*m.b608 + 0.5*m.b566*m.b623 + 0.5*m.b566*m.b628
+ 0.5*m.b566*m.b633 + m.b566*m.b664 + 0.5*m.b566*m.b670 + 0.5*m.b566*m.b673 + 0.5*m.b566*m.b674
+ m.b566*m.b676 + m.b566*m.b681 + m.b566*m.x856 + 0.5*m.b567*m.b581 + 0.5*m.b567*m.b582 + 0.5*
m.b567*m.b583 + 0.5*m.b567*m.b585 + m.b567*m.b595 + 0.5*m.b567*m.b611 + 0.5*m.b567*m.b614 + 0.5*
m.b567*m.b619 + 0.5*m.b567*m.b624 + 0.5*m.b567*m.b626 + 0.5*m.b567*m.b627 + 0.5*m.b567*m.b632 +
m.b567*m.b635 + 0.5*m.b567*m.b636 + 0.5*m.b567*m.b657 + 0.5*m.b567*m.b661 + 0.5*m.b567*m.b670 +
0.5*m.b567*m.b672 + 0.5*m.b568*m.b573 + 0.5*m.b568*m.b588 + 0.5*m.b568*m.b593 + 0.5*m.b568*m.b598
+ m.b568*m.b601 + 0.5*m.b568*m.b606 + m.b568*m.b613 + m.b568*m.b620 + 0.5*m.b568*m.b622 + 0.5*
m.b568*m.b625 + 0.5*m.b568*m.b643 + m.b568*m.b647 + 0.5*m.b569*m.b574 + 0.5*m.b569*m.b576 + 0.5*
m.b569*m.b578 + 0.5*m.b569*m.b581 + 0.5*m.b569*m.b583 + 0.5*m.b569*m.b586 + 0.5*m.b569*m.b591 +
0.5*m.b569*m.b602 + 0.5*m.b569*m.b605 + 0.5*m.b569*m.b611 + 0.5*m.b569*m.b627 + 0.5*m.b569*m.b638
+ 0.5*m.b569*m.b639 + 0.5*m.b569*m.b641 + 0.5*m.b569*m.b645 + 0.5*m.b569*m.b646 + m.b569*m.b648
+ 0.5*m.b569*m.b650 + m.b569*m.b656 + 0.5*m.b569*m.b658 + 0.5*m.b569*m.b662 + m.b569*m.b666 +
0.5*m.b570*m.b572 + 0.5*m.b570*m.b574 + 0.5*m.b570*m.b587 + 0.5*m.b570*m.b603 + 0.5*m.b570*m.b605
+ 0.5*m.b570*m.b608 + 0.5*m.b570*m.b623 + 0.5*m.b570*m.b628 + 0.5*m.b570*m.b664 + 0.5*m.b570*
m.b670 + m.b570*m.b673 + 0.5*m.b570*m.b674 + 0.5*m.b570*m.b676 + 0.5*m.b570*m.b681 + m.b570*
m.x853 + 0.5*m.b571*m.b604 + 0.5*m.b571*m.b610 + m.b571*m.b629 + 0.5*m.b571*m.b637 + 0.5*m.b571*
m.b653 + 0.5*m.b571*m.b655 + 0.5*m.b571*m.b665 + 0.5*m.b571*m.b675 + m.b571*m.x862 + 0.5*m.b572*
m.b574 + 0.5*m.b572*m.b587 + 0.5*m.b572*m.b603 + 0.5*m.b572*m.b605 + 0.5*m.b572*m.b608 + 0.5*
m.b572*m.b623 + 0.5*m.b572*m.b628 + 0.5*m.b572*m.b664 + 0.5*m.b572*m.b670 + 0.5*m.b572*m.b673 +
m.b572*m.b674 + 0.5*m.b572*m.b676 + 0.5*m.b572*m.b681 + m.b572*m.x865 + 0.5*m.b573*m.b593 + 0.5*
m.b573*m.b598 + 0.5*m.b573*m.b601 + 0.5*m.b573*m.b613 + 0.5*m.b573*m.b620 + m.b573*m.b622 + 0.5*
m.b573*m.b625 + 0.5*m.b573*m.b643 + 0.5*m.b573*m.b647 + 0.5*m.b573*m.b668 + m.b573*m.x857 + 0.5*
m.b574*m.b576 + 0.5*m.b574*m.b583 + 0.5*m.b574*m.b586 + 0.5*m.b574*m.b587 + 0.5*m.b574*m.b591 +
0.5*m.b574*m.b602 + 0.5*m.b574*m.b603 + m.b574*m.b605 + 0.5*m.b574*m.b608 + 0.5*m.b574*m.b623 +
0.5*m.b574*m.b628 + 0.5*m.b574*m.b641 + 0.5*m.b574*m.b645 + 0.5*m.b574*m.b648 + 0.5*m.b574*m.b650
+ 0.5*m.b574*m.b656 + 0.5*m.b574*m.b658 + 0.5*m.b574*m.b662 + 0.5*m.b574*m.b664 + 0.5*m.b574*
m.b666 + 0.5*m.b574*m.b670 + 0.5*m.b574*m.b673 + 0.5*m.b574*m.b674 + 0.5*m.b574*m.b676 + 0.5*
m.b574*m.b681 + 0.5*m.b575*m.b577 + 0.5*m.b575*m.b579 + 0.5*m.b575*m.b580 + 0.5*m.b575*m.b594 +
0.5*m.b575*m.b607 + 0.5*m.b575*m.b653 + 0.5*m.b575*m.b667 + 0.5*m.b575*m.b680 + m.b575*m.x863 +
0.5*m.b576*m.b583 + 0.5*m.b576*m.b586 + 0.5*m.b576*m.b591 + 0.5*m.b576*m.b602 + 0.5*m.b576*m.b605
+ 0.5*m.b576*m.b631 + 0.5*m.b576*m.b641 + 0.5*m.b576*m.b645 + 0.5*m.b576*m.b648 + 0.5*m.b576*
m.b650 + 0.5*m.b576*m.b651 + 0.5*m.b576*m.b656 + m.b576*m.b658 + m.b576*m.b662 + 0.5*m.b576*
m.b666 + m.b577*m.b579 + 0.5*m.b577*m.b580 + 0.5*m.b577*m.b594 + 0.5*m.b577*m.b596 + 0.5*m.b577*
m.b607 + 0.5*m.b577*m.b634 + 0.5*m.b577*m.b640 + 0.5*m.b577*m.b649 + 0.5*m.b577*m.b653 + 0.5*
m.b577*m.b654 + 0.5*m.b577*m.b655 + 0.5*m.b577*m.b665 + 0.5*m.b577*m.b667 + 0.5*m.b577*m.b675 +
0.5*m.b577*m.b680 + 0.5*m.b578*m.b581 + 0.5*m.b578*m.b585 + 0.5*m.b578*m.b609 + 0.5*m.b578*m.b611
+ 0.5*m.b578*m.b618 + 0.5*m.b578*m.b624 + 0.5*m.b578*m.b625 + 0.5*m.b578*m.b627 + 0.5*m.b578*
m.b630 + 0.5*m.b578*m.b631 + 0.5*m.b578*m.b636 + 0.5*m.b578*m.b638 + 0.5*m.b578*m.b639 + 0.5*
m.b578*m.b646 + 0.5*m.b578*m.b648 + 0.5*m.b578*m.b651 + 0.5*m.b578*m.b656 + 0.5*m.b578*m.b666 +
0.5*m.b579*m.b580 + 0.5*m.b579*m.b594 + 0.5*m.b579*m.b596 + 0.5*m.b579*m.b607 + 0.5*m.b579*m.b634
+ 0.5*m.b579*m.b640 + 0.5*m.b579*m.b649 + 0.5*m.b579*m.b653 + 0.5*m.b579*m.b654 + 0.5*m.b579*
m.b655 + 0.5*m.b579*m.b665 + 0.5*m.b579*m.b667 + 0.5*m.b579*m.b675 + 0.5*m.b579*m.b680 + 0.5*
m.b580*m.b584 + 0.5*m.b580*m.b594 + 0.5*m.b580*m.b600 + m.b580*m.b607 + 0.5*m.b580*m.b615 + 0.5*
m.b580*m.b621 + 0.5*m.b580*m.b653 + 0.5*m.b580*m.b667 + 0.5*m.b580*m.b669 + m.b580*m.b680 + 0.5*
m.b581*m.b582 + 0.5*m.b581*m.b595 + m.b581*m.b611 + 0.5*m.b581*m.b614 + 0.5*m.b581*m.b619 + 0.5*
m.b581*m.b626 + m.b581*m.b627 + 0.5*m.b581*m.b632 + 0.5*m.b581*m.b635 + 0.5*m.b581*m.b638 + 0.5*
m.b581*m.b639 + 0.5*m.b581*m.b646 + 0.5*m.b581*m.b648 + 0.5*m.b581*m.b656 + 0.5*m.b581*m.b657 +
0.5*m.b581*m.b661 + 0.5*m.b581*m.b666 + 0.5*m.b581*m.b670 + 0.5*m.b581*m.b672 + 0.5*m.b582*m.b595
+ 0.5*m.b582*m.b611 + m.b582*m.b614 + m.b582*m.b619 + 0.5*m.b582*m.b626 + 0.5*m.b582*m.b627 +
m.b582*m.b632 + 0.5*m.b582*m.b633 + 0.5*m.b582*m.b635 + 0.5*m.b582*m.b645 + m.b582*m.b657 + 0.5*
m.b582*m.b660 + 0.5*m.b582*m.b661 + 0.5*m.b582*m.b670 + 0.5*m.b582*m.b672 + 0.5*m.b582*m.b677 +
0.5*m.b583*m.b585 + 0.5*m.b583*m.b586 + 0.5*m.b583*m.b591 + 0.5*m.b583*m.b595 + 0.5*m.b583*m.b602
+ 0.5*m.b583*m.b605 + 0.5*m.b583*m.b624 + 0.5*m.b583*m.b635 + 0.5*m.b583*m.b636 + 0.5*m.b583*
m.b641 + 0.5*m.b583*m.b645 + 0.5*m.b583*m.b648 + 0.5*m.b583*m.b650 + 0.5*m.b583*m.b656 + 0.5*
m.b583*m.b658 + 0.5*m.b583*m.b662 + 0.5*m.b583*m.b666 + 0.5*m.b584*m.b600 + 0.5*m.b584*m.b607 +
0.5*m.b584*m.b615 + 0.5*m.b584*m.b617 + 0.5*m.b584*m.b621 + 0.5*m.b584*m.b649 + 0.5*m.b584*m.b669
+ 0.5*m.b584*m.b680 + 0.5*m.b585*m.b595 + 0.5*m.b585*m.b609 + 0.5*m.b585*m.b618 + m.b585*m.b624
+ 0.5*m.b585*m.b625 + 0.5*m.b585*m.b630 + 0.5*m.b585*m.b631 + 0.5*m.b585*m.b635 + m.b585*m.b636
+ 0.5*m.b585*m.b651 + 0.5*m.b586*m.b591 + 0.5*m.b586*m.b602 + 0.5*m.b586*m.b605 + 0.5*m.b586*
m.b626 + m.b586*m.b641 + 0.5*m.b586*m.b645 + 0.5*m.b586*m.b648 + m.b586*m.b650 + 0.5*m.b586*
m.b656 + 0.5*m.b586*m.b658 + 0.5*m.b586*m.b662 + 0.5*m.b586*m.b666 + m.b587*m.b603 + 0.5*m.b587*
m.b605 + m.b587*m.b608 + 0.5*m.b587*m.b623 + m.b587*m.b628 + 0.5*m.b587*m.b638 + 0.5*m.b587*
m.b639 + 0.5*m.b587*m.b646 + 0.5*m.b587*m.b660 + 0.5*m.b587*m.b664 + 0.5*m.b587*m.b670 + 0.5*
m.b587*m.b671 + 0.5*m.b587*m.b673 + 0.5*m.b587*m.b674 + 0.5*m.b587*m.b676 + 0.5*m.b587*m.b678 +
0.5*m.b587*m.b681 + m.b587*m.x861 + 0.5*m.b588*m.b601 + 0.5*m.b588*m.b606 + 0.5*m.b588*m.b609 +
0.5*m.b588*m.b610 + 0.5*m.b588*m.b612 + 0.5*m.b588*m.b613 + 0.5*m.b588*m.b618 + 0.5*m.b588*m.b620
+ 0.5*m.b588*m.b630 + 0.5*m.b588*m.b644 + 0.5*m.b588*m.b647 + 0.5*m.b588*m.b654 + 0.5*m.b588*
m.b668 + 0.5*m.b588*m.b679 + 0.5*m.b589*m.b590 + 0.5*m.b589*m.b592 + 0.5*m.b589*m.b594 + 0.5*
m.b589*m.b597 + 0.5*m.b589*m.b615 + 0.5*m.b589*m.b621 + 0.5*m.b589*m.b642 + m.b589*m.b659 +
m.b589*m.b663 + 0.5*m.b590*m.b615 + 0.5*m.b590*m.b621 + 0.5*m.b590*m.b659 + 0.5*m.b590*m.b663 +
0.5*m.b590*m.b667 + m.b590*m.x859 + m.b591*m.b602 + 0.5*m.b591*m.b605 + 0.5*m.b591*m.b641 + 0.5*
m.b591*m.b645 + 0.5*m.b591*m.b648 + 0.5*m.b591*m.b650 + 0.5*m.b591*m.b656 + 0.5*m.b591*m.b658 +
0.5*m.b591*m.b661 + 0.5*m.b591*m.b662 + 0.5*m.b591*m.b666 + 0.5*m.b591*m.b671 + 0.5*m.b591*m.b672
+ 0.5*m.b591*m.b678 + 0.5*m.b591*m.b714 + 0.5*m.b591*m.b761 + 0.5*m.b591*m.b765 + 0.5*m.b591*
m.b790 + 0.5*m.b591*m.b798 + 0.5*m.b591*m.b804 + 0.5*m.b591*m.b809 + 0.5*m.b591*m.b811 + 0.5*
m.b591*m.b816 + 0.5*m.b591*m.b823 + 0.5*m.b591*m.b826 + 0.5*m.b592*m.b594 + 0.5*m.b592*m.b596 +
m.b592*m.b597 + 0.5*m.b592*m.b600 + 0.5*m.b592*m.b637 + 0.5*m.b592*m.b642 + 0.5*m.b592*m.b659 +
0.5*m.b592*m.b663 + 0.5*m.b592*m.b669 + m.b593*m.b598 + 0.5*m.b593*m.b599 + 0.5*m.b593*m.b601 +
0.5*m.b593*m.b612 + 0.5*m.b593*m.b613 + 0.5*m.b593*m.b617 + 0.5*m.b593*m.b620 + 0.5*m.b593*m.b622
+ 0.5*m.b593*m.b625 + 0.5*m.b593*m.b643 + 0.5*m.b593*m.b647 + 0.5*m.b594*m.b597 + 0.5*m.b594*
m.b607 + 0.5*m.b594*m.b642 + 0.5*m.b594*m.b653 + 0.5*m.b594*m.b659 + 0.5*m.b594*m.b663 + 0.5*
m.b594*m.b667 + 0.5*m.b594*m.b680 + 0.5*m.b595*m.b611 + 0.5*m.b595*m.b614 + 0.5*m.b595*m.b619 +
0.5*m.b595*m.b624 + 0.5*m.b595*m.b626 + 0.5*m.b595*m.b627 + 0.5*m.b595*m.b632 + m.b595*m.b635 +
0.5*m.b595*m.b636 + 0.5*m.b595*m.b657 + 0.5*m.b595*m.b661 + 0.5*m.b595*m.b670 + 0.5*m.b595*m.b672
+ 0.5*m.b596*m.b597 + 0.5*m.b596*m.b600 + 0.5*m.b596*m.b634 + 0.5*m.b596*m.b637 + 0.5*m.b596*
m.b640 + 0.5*m.b596*m.b649 + 0.5*m.b596*m.b654 + 0.5*m.b596*m.b655 + 0.5*m.b596*m.b665 + 0.5*
m.b596*m.b669 + 0.5*m.b596*m.b675 + 0.5*m.b597*m.b600 + 0.5*m.b597*m.b637 + 0.5*m.b597*m.b642 +
0.5*m.b597*m.b659 + 0.5*m.b597*m.b663 + 0.5*m.b597*m.b669 | |
import torch
from torch import nn
import gym
from gym.spaces import Box, Discrete, Space
from copy import copy, deepcopy
import numpy as np
from typing import Optional, Union, Iterable, List, Dict, Tuple, Any
from numbers import Real, Integral
from .runningstat import RunningStat
from .misc import (
fill_parameters,
get_parameter_vector,
positive_int_or_none,
positive_int,
positive_float,
get_env_spaces,
get_1D_box_length,
get_action_space_length
)
ParamVector = Union[List[Real], np.ndarray]
Action = Union[List[Real], np.ndarray, Integral]
class Policy:
"""Base class for a policy."""
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None):
"""``__init__(...)``: Initialize the policy object.
The initializer must be called from the initializer
of the inheriting classes.
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
"""
self._policy: nn.Module
if bool(observation_normalization):
self._main_obs_stats = RunningStat()
self._collected_obs_stats = RunningStat()
else:
self._main_obs_stats = None
self._collected_obs_stats = None
if not isinstance(env_name, str):
raise TypeError(
"Environment name was expected as an str,"
+ " but it was received as: "
+ repr(env_name)
)
self._env_name = env_name
if env_config is None:
self._env_config = {}
else:
self._env_config = env_config
self._env: Optional[gym.Env] = None
self._observation_space, self._action_space = (
get_env_spaces(self._env_name, self._env_config)
)
self._seed = seed
self._collect_obs_stats = True
self.notes: Any = None
def _get_env(self) -> gym.Env:
if self._env is None:
self._env = gym.make(self._env_name, **(self._env_config))
if self._seed is not None:
self._env.seed(self._seed)
return self._env
def __getstate__(self):
state = {"_env": None}
for k, v in self.__dict__.items():
if k != "_env":
state[k] = v
return state
def __setstate__(self, state):
state: dict
for k, v in state.items():
self.__dict__[k] = v
def _use_policy(self, observation: Iterable[Real]) -> Action:
x = torch.as_tensor(observation, dtype=torch.float32)
with torch.no_grad():
action = self._policy(x).numpy()
if isinstance(self._action_space, Box):
action = np.clip(
action,
self._action_space.low,
self._action_space.high
)
elif isinstance(self._action_space, Discrete):
action = np.argmax(action)
else:
raise TypeError(
"Cannot work with this action space: "
+ repr(self._action_space)
)
return action
def run(self,
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> Tuple[float, int]:
"""Run an episode.
Args:
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A tuple (cumulative_reward, number_of_interactions).
"""
max_episode_length = positive_int_or_none(max_episode_length)
def normalized(obs):
if self._main_obs_stats is not None:
if self._collect_obs_stats:
self._main_obs_stats.update(obs)
self._collected_obs_stats.update(obs)
return self._main_obs_stats.normalize(obs)
else:
return obs
t = 0
cumulative_reward = 0.0
env = self._get_env()
observation = env.reset()
observation = normalized(observation)
while True:
action = self._use_policy(observation)
observation, reward, done, info = env.step(action)
observation = normalized(observation)
t += 1
reward -= decrease_rewards_by
cumulative_reward += reward
if max_episode_length is not None and t > max_episode_length:
break
if done:
break
return cumulative_reward, t
def set_params_and_run(self,
policy_parameters: ParamVector,
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Tuple[float, int]):
"""Set the the parameters of the policy by copying them
from the given parameter vector, then run an episode.
Args:
policy_parameters: The policy parameters to be used.
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A tuple (cumulative_reward, number_of_interactions).
"""
self.set_parameters(policy_parameters)
return self.run(
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
def _run_from_list(self,
policy_param_list: List[ParamVector],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
List[Tuple[float, int]]):
results = []
for policy_params in policy_param_list:
results.append(
self.set_params_and_run(
policy_params,
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
)
return results
def _run_from_dict(self,
policy_param_dict: Dict[Any, ParamVector],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Dict[Any, Tuple[float, int]]):
results = {}
for policy_key, policy_params in policy_param_dict.items():
results[policy_key] = (
self.set_params_and_run(
policy_params,
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
)
return results
def set_params_and_run_all(self,
policy_params_all: Union[
List[ParamVector],
Dict[Any, ParamVector]
],
*,
decrease_rewards_by: Real=0.0,
max_episode_length: Optional[Integral]=None) -> (
Union[
List[Tuple[float, int]],
Dict[Any, Tuple[float, int]]
]
):
"""For each of the items in the given parameters dictionary,
set the the parameters of the policy by copying them
from the given parameter vector, then run an episode.
Args:
policy_params_all: A dictionary, mapping a policy identifier
to a policy parameter vector.
For example, the policy identifier here could possibly
be an integer specifying the index of the
parameter vector within a batch of parameter vectors.
decrease_rewards_by: The reward at each timestep will be
decreased by this given amount.
max_episode_length: The maximum number of interactions
allowed in an episode.
Returns:
A dictionary where each item maps the policy identifier key
to a tuple (cumulative_reward, number_of_interactions).
"""
kwargs = dict(
decrease_rewards_by=decrease_rewards_by,
max_episode_length=max_episode_length
)
received_dict = (
hasattr(policy_params_all, "keys")
and hasattr(policy_params_all, "values")
)
if received_dict:
return self._run_from_dict(policy_params_all, **kwargs)
else:
return self._run_from_list(policy_params_all, **kwargs)
def set_parameters(self, parameters: ParamVector):
"""Set the parameters of the policy by copying the values
from the given parameter vector.
Args:
parameters: The parameter vector.
"""
#x = torch.as_tensor(parameters, dtype=torch.float32)
if isinstance(parameters, np.ndarray):
parameters = parameters.copy()
x = torch.as_tensor(parameters, dtype=torch.float32)
fill_parameters(self._policy, x)
def get_parameters(self) -> np.ndarray:
"""Get the parameters of the policy as a 1-D numpy array.
Returns:
The parameter vector.
"""
return get_parameter_vector(self._policy).numpy()
def pop_collected_obs_stats(self) -> RunningStat:
"""Get the collected observation statistics.
When this method is called, the contained collected
statistics are removed.
Returns:
The collected observation statistics.
"""
if self._collected_obs_stats is None:
raise ValueError(
"Observation stats are not configured to be collected,"
" therefore, they cannot be popped."
)
result = self._collected_obs_stats
self._collected_obs_stats = RunningStat()
return result
def set_main_obs_stats(self, obs_stats: RunningStat):
"""Set the observation statistics to be used for
observation normalization.
Args:
obs_stats: A RunningStat object containing the statistics.
"""
if obs_stats is None:
raise ValueError(
"The main observation stats cannot be given as None."
)
self._main_obs_stats = deepcopy(obs_stats)
def get_main_obs_stats(self) -> Optional[RunningStat]:
"""Get the observation statistics used for
observation normalization.
Returns:
A RunningStat object containing the statistics.
"""
return self._main_obs_stats
def update_main_obs_stats(self, obs_stats: Union[RunningStat, np.ndarray]):
"""Update the observation statistics used for
observation normalization.
Args:
obs_stats: A RunningStat object or a numpy array
(a numpy array representing a single observation vector).
"""
if self._main_obs_stats is None:
raise ValueError(
"There is no observation stats to update."
+ " Was "
+ repr(self)
+ " initialized with observation_normalization=False?"
)
self._main_obs_stats.update(obs_stats)
def get_parameters_count(self) -> int:
"""Get the number of parameters of the policy
(also corresponds to the length of parameter vector).
"""
return len(self.get_parameters())
def get_collect_obs_stats(self) -> bool:
"""Get, as boolean, whether or not the policy is configured
to collect observation statistics when running episodes.
Returns:
A boolean.
"""
return self._collect_obs_stats
def set_collect_obs_stats(self, b: bool):
"""Set, as boolean, whether or not the policy is to collect
observation statistics when running episodes.
Args:
b: A boolean.
"""
self._collect_obs_stats = bool(b)
class LinearPolicy(Policy):
"""A linear policy."""
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None,
bias: bool=True):
"""``__init__(...)``: Initialize the linear policy.
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an integer.
Pass here an integer for explicitly setting a
random seed for the stochastic operations of
the gym environment.
bias: Expected as a boolean, specifying whether or
not the linear policy will have bias parameters.
"""
Policy.__init__(
self,
env_name=env_name,
env_config=env_config,
observation_normalization=observation_normalization,
seed=seed
)
obs_length = get_1D_box_length(self._observation_space)
act_length = get_action_space_length(self._action_space)
self._policy = nn.Linear(obs_length, act_length, bias=bias)
class MLPPolicy(Policy):
"""A multi-layer perceptron policy."""
ACTIVATION_CLS = {
"tanh": nn.Tanh,
"relu": nn.ReLU
}
def __init__(self,
*,
env_name: str,
env_config: Optional[dict]=None,
observation_normalization: bool=True,
seed: Optional[Integral]=None,
hidden_size: Integral=64,
num_hidden: Integral=1,
hidden_activation: str="tanh",
output_activation: Optional[str]=None):
"""
Args:
env_name: Expected as a string specifying the gym
environment ID (e.g. 'Humanoid-v2').
env_config: Expected as None, or as a dictionary
containing the keyword arguments to be passed
to ``gym.make`` when creating the environment.
observation_normalization: Expected as boolean,
specifying whether or not the observations
are to be normalized.
seed: Expected as None or as an | |
import os
import datetime
import tempfile
import sys
from subprocess import call
import numpy as np
class ansyswrapper:
__matid = 0
__csid = 10
__ls_num = 1
def __init__(self, path_to_and_bin=None, ans_hi_version=250, ans_low_version=50, anslic='ane3fl',
infile='input.dat',
outfile='output.dat', projdir=tempfile.gettempdir(), isBatch=True, jobname=None):
self.ans_hi_version = ans_hi_version
self.ans_low_version = ans_low_version
self.path_to_ans_bin = path_to_and_bin
self.anslic = anslic
self.inputfile = infile
self.outputfile = outfile
if not jobname:
dt = datetime.datetime.now()
self.jobname = 'jobname{0}-{1}-{2}--{3}-{4}'.format(dt.year, dt.month, dt.day, dt.hour, dt.minute)
else:
self.jobname = jobname
self.apdl = ""
self.apdl += "FINISH\n"
self.apdl += "/CLEAR,START\n"
self.apdl += "/prep7\n"
self.apdl += "it_num = 1\n"
self.projdir = projdir
self.__isBatch = isBatch
self.apdl += """
!--------------- randomize ----------------
*GET,DIM,ACTIVE,0,TIME,WALL
DIM=DIM*3600
*DIM,DUMMY,ARRAY,DIM
*VFILL,DUMMY(1),RAND
*DEL,DIM
*DEL,DUMMY
!--------------- randomize ----------------\n"""
self.apdl += "*DMAT, mat_s, D, Alloc, 3, 3, incore\n"
self.apdl += "*VEC, vec_b, D, alloc, 3\n"
self.apdl += "*VEC, vec_x, D, alloc, 3\n"
def saveToFile(self, filename):
f = open(filename, mode='w')
f.write(self.apdl)
f.close()
def getNP(self):
return os.cpu_count()
def findPathVersion(self):
if self.path_to_ans_bin:
return self.path_to_ans_bin
else:
__ansversion = -1
path = ""
for i in range(self.ans_hi_version, self.ans_low_version, -1):
if os.environ.get('ANSYS{0}_DIR'.format(i)):
path = os.environ.get('ANSYS{0}_DIR'.format(i))
__ansversion = i
break
if __ansversion == -1:
print("Ansys not found")
exit(1)
return None
path += '\\bin\\' + os.environ['ANSYS_SYSDIR'] + '\\ANSYS{0}'.format(__ansversion)
return path
def defaultArgs(self):
x_drivers = {
"linux": "X11",
"win": "win32"
}
i_file = os.path.abspath(self.projdir + os.sep + self.inputfile)
o_file = os.path.abspath(self.projdir + os.sep + self.outputfile)
p_dir = os.path.abspath(self.projdir)
if self.__isBatch:
return '-b -p {0} -smp -np {1} -dir {2} -j {3} -s noread -i {4} -o {5} -d {6}'.format(
self.anslic, self.getNP(), p_dir, self.jobname, i_file, o_file, x_drivers[sys.platform]
)
else:
return '-g -p {0} -np {1} -dir {2} -j {2} -s read -d win32'.format(
self.anslic, self.getNP(), self.projdir, self.jobname
)
def run(self, apdl=None):
self.apdl += "/EXIT, NOSAVE,\n"
cwd = os.getcwd()
os.chdir(self.projdir)
self.saveToFile(self.projdir + os.sep + self.inputfile)
print(['bash', self.findPathVersion()] + self.defaultArgs().split(" "))
print('bash ' + self.findPathVersion() + self.defaultArgs())
ret_code = 0
if sys.platform == "linux":
ret_code = call(['bash', self.findPathVersion()] + self.defaultArgs().split(" "))
elif sys.platform == "win":
ret_code = call([self.findPathVersion()] + self.defaultArgs().split(" "))
os.chdir(cwd)
exitcodes = dict()
# exitcodes[0] = 'Normal Exit'
exitcodes[1] = 'Stack Error'
exitcodes[2] = 'Stack Error'
exitcodes[3] = 'Stack Error'
exitcodes[4] = 'Stack Error'
exitcodes[5] = 'Command Line Argument Error'
exitcodes[6] = 'Accounting File Error'
exitcodes[7] = 'Auth File Verification Error'
exitcodes[8] = 'Error in ANSYS or End-of-run'
exitcodes[11] = 'User Routine Error'
exitcodes[12] = 'Macro STOP Command'
exitcodes[14] = 'XOX Error'
exitcodes[15] = 'Fatal Error'
exitcodes[16] = 'Possible Full Disk'
exitcodes[17] = 'Possible Corrupted or Missing File'
exitcodes[18] = 'Possible Corrupted DB File'
exitcodes[21] = 'Authorized Code Section Entered'
exitcodes[25] = 'Unable to Open X11 Server'
exitcodes[30] = 'Quit Signal'
exitcodes[31] = 'Failure to Get Signal'
exitcodes[32] = 'System-dependent Error'
if ret_code > 32:
exitcodes[ret_code] = 'Unknown Error. Check for *.lock files in working directory and delete it'
if ret_code in exitcodes:
print('------ANSYS ERROR EXIT CODE-------')
print('Ansys exit code = {0}, with message: {1}'.format(ret_code, exitcodes[ret_code]))
print('----------------------------------')
print('Terminating.......')
exit(ret_code)
return ret_code
def rectangle(self, x1, y1, x2, y2):
self.apdl += "RECTNG,{0},{1},{2},{3},\n".format(x1, x2, y1, y2)
def circle(self, x, y, rad):
self.apdl += "CYL4, {0}, {1}, {2}\n".format(x, y, rad)
def ellipse(self, x, y, r1, r2):
self.apdl += "ASEL, NONE\n"
self.circle(x, y, 1)
self.apdl += "ARSCALE, ALL, , , {0}, {1}, , , 1, 1\n".format(r1, r2)
self.apdl += "ASEL, ALL\n"
def setFEByNum(self, num):
self.apdl += "ET, 1, {0}\n".format(num)
# self.apdl += "KEYOPT, 1, 3, 2\n"
def createIsotropicMat(self, E, nu):
self.__matid += 1
self.apdl += "MPTEMP,, , , , , , ,\n"
self.apdl += "MPTEMP, 1, 0\n"
self.apdl += "MPDATA, EX, {1},, {0}\n".format(E, self.__matid)
self.apdl += "MPDATA, PRXY, {1},, {0}\n".format(nu, self.__matid)
return self.__matid
def overlapAreas(self):
self.apdl += "AOVLAP,ALL\n"
def createOrtotropicMat(self, c11, c12, c13, c22, c23, c33, c44, c55=None, c66=None):
self.__matid += 1
if c55 == None:
c55 = c44
if c66 == None:
c66 = c44
Ex = (c11 * c22 * c33 + 2 * c23 * c12 * c13 - c11 * c23 ** 2 - c22 * c13 ** 2 - c33 * c12 ** 2) / (
c22 * c33 - c23 ** 2)
Ey = (c11 * c22 * c33 + 2 * c23 * c12 * c13 - c11 * c23 ** 2 - c22 * c13 ** 2 - c33 * c12 ** 2) / (
c11 * c33 - c13 ** 2)
Ez = (c11 * c22 * c33 + 2 * c23 * c12 * c13 - c11 * c23 ** 2 - c22 * c13 ** 2 - c33 * c12 ** 2) / (
c11 * c22 - c12 ** 2)
nuxy = (c12 * c33 - c13 * c23) / (c22 * c33 - c23 ** 2)
nuxz = (c22 * c13 - c12 * c23) / (c22 * c33 - c23 ** 2)
nuyz = (c11 * c23 - c12 * c13) / (c11 * c33 - c13 ** 2)
self.apdl += "MPTEMP,, , , , , , ,\n"
self.apdl += "MPTEMP, 1, 0\n"
self.apdl += "MPDATA, EX, {1},, {0}\n".format(Ex, self.__matid)
self.apdl += "MPDATA, EY, {1},, {0}\n".format(Ey, self.__matid)
self.apdl += "MPDATA, EZ, {1},, {0}\n".format(Ez, self.__matid)
self.apdl += "MPDATA, PRXY, {1},, {0}\n".format(nuxy, self.__matid)
self.apdl += "MPDATA, PRYZ, {1},, {0}\n".format(nuyz, self.__matid)
self.apdl += "MPDATA, PRXZ, {1},, {0}\n".format(nuxz, self.__matid)
self.apdl += "MPDATA, GXY, {1},, {0}\n".format(c44, self.__matid)
self.apdl += "MPDATA, GYZ, {1},, {0}\n".format(c55, self.__matid)
self.apdl += "MPDATA, GXZ, {1},, {0}\n".format(c66, self.__matid)
return self.__matid
def setAreaPropByCoord(self, x, y, matId=1, createRandomCS=False):
self.apdl += "ASEL,S,LOC,X,{0},{1}\n".format(0.95 * x, 1.05 * x)
self.apdl += "ASEL,R,LOC,Y,{0},{1}\n".format(0.95 * y, 1.05 * y)
csid = 0
if createRandomCS:
self.__csid += 1
self.apdl += "LOCAL, {2}, 0, {0}, {1}, 0, RAND(0, 360),, , 1, 1,\n".format(x, y, self.__csid)
csid = self.__csid
self.apdl += "AATT, {0}, , 1, {1},\n".format(matId, csid)
self.apdl += "ASEL,S, , ,all \n"
self.apdl += "CSYS,0 \n"
def delOuterArea(self, x1, y1, x2, y2):
self.apdl += "ASEL,S,LOC,X,{0},{1}\n".format(x1, x2)
self.apdl += "ASEL,R,LOC,Y,{0},{1}\n".format(y1, y2)
self.apdl += "ASEL, INVE\n"
self.apdl += "ADELE,ALL, , ,1\n"
self.apdl += "ASEL,S, , ,all\n"
def setCirlceAreaMatProps(self, rad, matId):
self.apdl += """
CSYS,1
ASEL,S,LOC,X,0,{0}
CSYS,0
AATT, {1},
ASEL,S, , ,all
""".format(rad, matId)
def setAreaProps(self, arealimit, matId=1):
self.apdl += """
nextarea = 0
maxarea = {1}
bigareaid = 0
csid=12
*get, numarea, area, 0, count
*do, i, 1, numarea, 1
asum,
*get, nextarea, area, nextarea, NXTH
*get, area_val, area, nextarea, area
*if,area_val,gt,maxarea,then
bigareaid = nextarea
*CYCLE
*endif
ASEL,S, , , nextarea
asum,
*get, cx, area, 0, cent, x
*get, cy, area, 0, cent, y
LOCAL, csid, 0, cx, cy, 0, RAND(0, 360),, , 1, 1,
AATT, {0}, , 1, csid
csid = csid + 1
ASEL,S, , ,all
CSYS, 0
*enddo
\n""".format(matId, arealimit)
def mesh(self, smartsize=1):
self.apdl += "SMRT, {0}\n".format(smartsize)
self.apdl += "AMESH, all\n"
def applyTensX(self, x1, y1, x2, y2, eps=0.1):
self.prep7()
self.clearbc()
self.apdl += "LSEL,S,LOC,X,{0}\n".format(x1)
self.apdl += "DL, ALL, ,UX,0\n"
self.apdl += "LSEL,S,LOC,X,{0}\n".format(x2)
self.apdl += "DL, ALL, ,UX,{0}\n".format(eps * x2)
self.apdl += "LSEL,S,LOC,Y,{0}\n".format(y1)
self.apdl += "LSEL,A,LOC,Y,{0}\n".format(y2)
self.apdl += "DL, ALL, ,UY,0\n"
self.apdl += "LSEL,S, , ,all\n"
self.solve()
self.post()
self.prep7()
self.apdl += """
mat_s(1,1) = SXX0
mat_s(1,2) = SYY0
mat_s(1,3) = 0
vec_b(1) = EXX0\n"""
def applyTensY(self, x1, y1, x2, y2, eps=0.1):
self.prep7()
self.clearbc()
self.apdl += "LSEL,S,LOC,Y,{0}\n".format(y1)
self.apdl += "DL, ALL, ,UY,0\n"
self.apdl += "LSEL,S,LOC,Y,{0}\n".format(y2)
self.apdl += "DL, ALL, ,UY,{0}\n".format(eps * y2)
self.apdl += "LSEL,S,LOC,X,{0}\n".format(x1)
self.apdl += "LSEL,A,LOC,X,{0}\n".format(x2)
self.apdl += "DL, ALL, ,UX,0\n"
self.apdl += "LSEL,S, , ,all\n"
self.solve()
self.post()
self.prep7()
self.apdl += """
mat_s(2,1) = 0
mat_s(2,2) = SXX0
mat_s(2,3) = SYY0
vec_b(2) = EYY0\n"""
def applyTensXandY(self, x1, y1, x2, y2, eps=0.1):
self.prep7()
self.clearbc()
self.apdl += "LSEL,S,LOC,Y,{0}\n".format(y1)
self.apdl += "DL, ALL, ,UY,0\n"
self.apdl += "LSEL,S,LOC,Y,{0}\n".format(y2)
self.apdl += "DL, ALL, ,UY,{0}\n".format(eps * y2)
self.apdl += "LSEL,S,LOC,X,{0}\n".format(x1)
self.apdl += "DL, ALL, ,UX,0\n"
self.apdl += "LSEL,S,LOC,X,{0}\n".format(x2)
self.apdl += "DL, ALL, ,UX,{0}\n".format(eps * x2)
self.apdl += "LSEL,S, , ,all\n"
self.solve()
self.post()
self.prep7()
self.apdl += """
mat_s(3,1) = 0
mat_s(3,2) = SXX0
mat_s(3,3) = SYY0
vec_b(3)=EYY0\n"""
def applyTensXandY(self, x1, y1, x2, y2, epsx, epsy):
self.prep7()
self.clearbc()
self.apdl += | |
import time
import argparse
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from pathlib import Path
import context
from mhealth.utils.plotter_helper import save_figure
from mhealth.utils.commons import create_progress_bar
# Used if command-line option --parameters is not provided.
DEFAULT_PARAMETERS = ["Temperatur", "Herzfrequenz", "Atemfrequenz"]
# Data sources included in HF-AF_25052021.csv.
VALIDATION_DATA_SOURCES = ["WELCHALLYN_MONITOR", "PHILIPS_GATEWAY"]
# Half-ranges relevant for the validation: x +/- delta
DELTAS = {
"Atemfrequenz": 3, # ±3bpm
"Herzfrequenz": 10, # ±10bpm
"Temperatur": 0.5 # ±0.5°C
}
# Half-range of the for the timestamp delta, in minutes.
DELTA_TS = 2.5 # ±2.5min
# Devices are identified by the bed number they are used with.
# In case of device breakdown (or other problems), some devices
# were replaced by a device of another room. The below lookup
# specifies which the bed ids (devices) must be renamed, as well
# as the time range, between which the lookup applies.
DEVICE_REPLACEMENT_LOOKUP = {
# Alias True From To
"2653F" : ("2655F", "2021-05-14 12:00:00+02:00", None),
"2652F" : ("2656FL", "2021-05-18 00:00:00+02:00", None),
"2661TL" : ("2661FL", "2021-05-20 00:00:00+02:00", None),
"2664T" : ("2664F", "2021-05-12 00:00:00+02:00", None),
"2665T" : ("2665F", None, "2021-05-19 10:30:00+02:00"),
}
# Expected value ranges per vital parameter.
VALUE_RANGES = {
"Atemfrequenz": [0, 35],
"Herzfrequenz": [30, 130],
"Temperatur": [35, 40],
}
BIN_WIDTHS = {
"Atemfrequenz": 0.5,
"Herzfrequenz": 1,
"Temperatur": 0.01,
}
BIN_WIDTHS_VALID = {
"Atemfrequenz": 1,
"Herzfrequenz": 2,
"Temperatur": 0.1,
}
def tic():
return time.time()
def toc(label, start):
diff = time.time()-start
print(label + (": %.3f" % diff))
def check_dir(path):
if not path.is_dir():
msg = "Requested folder does not exist: %s"
raise FileNotFoundError(msg % path)
def ensure_dir(path, exist_ok=True):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True, exist_ok=exist_ok)
return path.is_dir()
def apply_replacement_lookup(df):
print("Applying device replacements...")
def dt_to_str(dt):
return "--" if dt is None else dt.strftime("%m.%d.%y %H:%M")
for id_alias, replace_data in DEVICE_REPLACEMENT_LOOKUP.items():
id_true, repl_start, repl_stop = replace_data
repl_start = pd.to_datetime(repl_start)
repl_stop = pd.to_datetime(repl_stop)
mask = ((df["Bettenstellplatz"]==id_alias) &
((repl_start is None) or df["Timestamp"]>=repl_start) &
((repl_stop is None) or df["Timestamp"]<=repl_stop))
df.loc[mask, "Bettenstellplatz"] = id_true
print("%-6s => %-6s: %6d affected values in time range (%s, %s)"
% (id_alias, id_true, mask.sum(),
dt_to_str(repl_start), dt_to_str(repl_stop)))
print()
def read_validation_data(data_dir):
def no_whitespace(s):
return s.replace(" ", "")
def fix_time(s):
return s.replace(".", ":")
def form_timestamp(df, col_date, col_time):
timestamp = df[col_date] + " " + df[col_time]
timestamp = pd.to_datetime(timestamp, dayfirst=True)
timestamp = timestamp.dt.tz_localize("Europe/Zurich").copy()
timestamp[(df[col_date]=="") | (df[col_time]=="")] = None
return timestamp
def format_manual(df, timestamp, parameter):
df_ret = df[["Bettenstellplatz", parameter,
"Bemerkungen", "Abweichung_Trageort"]].copy()
df_ret = df_ret.rename({parameter: "Wert"}, axis=1)
icol = df_ret.columns.get_loc("Wert")
df_ret.insert(loc=icol, column="Vitalparameter", value=parameter)
df_ret.insert(loc=0, column="Timestamp", value=timestamp)
df_ret = df_ret[~df_ret["Wert"].isna()].copy()
return df_ret
def read_station_data(valid_dir):
file_path = valid_dir/"HF-AF_25052021.csv"
df = pd.read_csv(file_path,
converters={"Signatur": str.strip,
"Bettenstellplatz": str.strip})
df = df[df["Signatur"].isin(VALIDATION_DATA_SOURCES)]
timestamp = form_timestamp(df=df, col_date="Datum", col_time="Zeit")
df.insert(loc=0, column="Timestamp", value=timestamp)
df = df.drop(["Datum", "Zeit"], axis=1)
# Transform to long format.
df = df.melt(id_vars=["Timestamp", "Bettenstellplatz", "Signatur"],
value_vars=["Herzfrequenz", "Atemfrequenz", "Temperatur"],
var_name="Vitalparameter", value_name="Wert")
df = df[~df["Wert"].isna()].copy()
df["Bemerkungen"] = ""
df["Abweichung_Trageort"] = ""
return df
def read_manual_data(valid_dir):
file_path = valid_dir/"Validierung_Daten_manuell_Mai2021_alle.csv"
df = pd.read_csv(file_path,
converters={"Bettenstellplatz": no_whitespace,
"Zeit_AF": fix_time,
"Zeit_HF": fix_time,
"Bemerkungen": str.strip,
"Abweichung_Trageort": str.strip})
# Atemfrequenz
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_AF")
df_a = format_manual(df=df, timestamp=ts, parameter="Atemfrequenz")
# Herzfrequenz
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_HF")
df_h = format_manual(df=df, timestamp=ts, parameter="Herzfrequenz")
# Temperatur (Zeit_Temp, use Zeit_HF is missing!)
ts = form_timestamp(df=df, col_date="Datum", col_time="Zeit_HF")
df_t = format_manual(df=df, timestamp=ts, parameter="Temperatur")
df = pd.concat((df_a, df_h, df_t), axis=0)
df["Signatur"] = "MANUELL"
return df
print("Reading Validation data...")
valid_dir = data_dir/"original"/"validation"
check_dir(valid_dir)
df_station = read_station_data(valid_dir=valid_dir)
df_manual = read_manual_data(valid_dir=valid_dir)
df_valid = pd.concat((df_station, df_manual), axis=0)
df_valid = df_valid.sort_values(["Bettenstellplatz", "Timestamp"])
return df_valid
def read_baslerband_data(data_dir, n_max=None):
def read_bb_file(path):
# Sample path:
# ../2021-05-25/2617_FL/basler_band_DB_B4_2C_E5_CC_45_activity_file.csv
bed_id = path.parent.name.replace("_", "")
if bed_id == "2668":
bed_id = "2668E"
device_id = path.stem
device_id = device_id.replace("basler_band_", "")
device_id = device_id.replace("_activity_file", "")
df = pd.read_csv(path, index_col=[0], parse_dates=[0], sep=";")
df.index.name = "Timestamp"
# Filter by quality as specified
df = df[df["wearing"]==4]
df = df[["resp_filtered", "hrm_filtered",]]
df = df.rename({"resp_filtered": "Atemfrequenz",
"hrm_filtered": "Herzfrequenz"}, axis=1)
df["Bettenstellplatz"] = bed_id
df["DeviceID"] = device_id
df["Signatur"] = "BASLER_BAND"
df = df.reset_index(drop=False)
return df
print("Reading Basler Band data...")
bb_dir = data_dir/"original"/"basler_band"
check_dir(bb_dir)
files = bb_dir.glob("**/basler_band*activity_file.csv")
files = sorted(files)
dfs = []
progress = create_progress_bar(size=len(files),
label="Processing...")
for i, path in enumerate(files):
if i>=n_max:
break
progress.update(i)
df = read_bb_file(path=path)
dfs.append(df)
progress.finish()
df = pd.concat(dfs, axis=0)
df = df.melt(id_vars=["Timestamp", "Bettenstellplatz", "Signatur", "DeviceID"],
value_vars=["Herzfrequenz", "Atemfrequenz"],
var_name="Vitalparameter", value_name="Wert")
apply_replacement_lookup(df)
df = df.sort_values(["Bettenstellplatz", "Timestamp"])
return df
def read_core_data(data_dir, n_max=None):
def read_core_file(path, columns):
# Sample path:
# ../2021-05-17/2617_FL/core_D6_BE_C5_06_B3_48_storage-cbta_d.csv
bed_id = path.parent.name.replace("_", "")
if bed_id == "2668":
bed_id = "2668E"
device_id = path.stem
device_id = device_id.replace("core_", "")
device_id = device_id.replace("_storage-cbta_d", "")
df = pd.read_csv(path, index_col=[0], parse_dates=[0], sep=";")
df.index.name = "Timestamp"
df = df.rename(columns.to_dict(), axis=1)
# Filter by quality as specified
df = df[df["quality (core only)"]==4]
df = df[["cbt [mC]",]]
df = df.rename({"cbt [mC]": "Temperatur"}, axis=1)
df["Temperatur"] /= 1000 # from °mC to °C
df["Bettenstellplatz"] = bed_id
df["DeviceID"] = device_id
df["Signatur"] = "CORE"
df = df.reset_index(drop=False)
return df
print("Reading Core data...")
core_dir = data_dir/"original"/"core"
check_dir(core_dir)
columns = pd.read_csv(core_dir/"0_storage-cbta_d_columns.csv",
skipinitialspace=True,
index_col=[0], header=None, squeeze=True)
columns.index = columns.index.astype(str)
files = core_dir.glob("**/core_*storage-cbta_d.csv")
files = sorted(files)
progress = create_progress_bar(size=len(files),
label="Processing...")
dfs = []
for i, path in enumerate(files):
if i>=n_max:
break
progress.update(i)
df = read_core_file(path=path, columns=columns)
dfs.append(df)
progress.finish()
df = pd.concat(dfs, axis=0)
df = df.melt(id_vars=["Timestamp", "Bettenstellplatz", "Signatur", "DeviceID"],
value_vars=["Temperatur"],
var_name="Vitalparameter", value_name="Wert")
apply_replacement_lookup(df)
df = df.sort_values(["Bettenstellplatz", "Timestamp"])
return df
def read_data(data_dir, out_dir, force_read, n_files_max):
df_bb = None
df_core = None
df_valid = None
n_files_max = np.inf if n_files_max is None else n_files_max
path_store = out_dir/"store.h5"
if not force_read and path_store.is_file():
store = pd.HDFStore(path_store, mode="r")
if "valid" in store:
print("Reading validation data lazily...")
df_valid = store["valid"]
if "bb" in store:
print("Reading Basler Band data lazily...")
df_bb = store["bb"]
if "core" in store:
print("Reading Core data lazily...")
df_core = store["core"]
store.close()
if df_valid is None:
df_valid = read_validation_data(data_dir=data_dir)
df_valid.to_hdf(path_store, key="valid")
if df_bb is None:
df_bb = read_baslerband_data(data_dir=data_dir, n_max=n_files_max)
df_bb.to_hdf(path_store, key="bb")
if df_core is None:
df_core = read_core_data(data_dir=data_dir, n_max=n_files_max)
df_core.to_hdf(path_store, key="core")
df_sensor = pd.concat([df_bb, df_core], axis=0)
# Reset index so that it can be used for index operations.
df_sensor = df_sensor.reset_index(drop=True)
df_valid = df_valid.reset_index(drop=True)
return df_valid, df_sensor
def validate_data(df_sensor, df_valid, parameters, skip_zeros,
visualize, out_dir=None, interactive=False):
if parameters is None:
parameters = DEFAULT_PARAMETERS.copy()
iloc = df_valid.columns.get_loc("Wert")
df_valid.insert(loc=iloc+1, column="Messüberlappung", value=None)
df_valid.insert(loc=iloc+2, column="Sensor (mean)", value=np.nan)
df_valid.insert(loc=iloc+3, column="Sensor (std)", value=np.nan)
df_valid.insert(loc=iloc+4, column="Sensor (median)", value=np.nan)
df_valid.insert(loc=iloc+5, column="Sensor (samples)", value=None)
df_valid.insert(loc=iloc+6, column="Sensor (zeros)", value=None)
df_valid.insert(loc=iloc+7, column="Fehler (in range)", value=None)
def handle_key(event):
if event.key in "eEqQ":
nonlocal visualize
visualize = False
def handle_close(evt):
nonlocal visualize
visualize = False
def plot_signal_hist(ax, data, parameter, bed_id, tsv):
tsv_str = tsv.strftime("%H:%M (%d.%m.%y)")
ax.clear()
bw = BIN_WIDTHS[parameter]
bin_left = np.round_(data.min(), 2)
bin_right = data.max()+bw
bins = np.arange(bin_left, bin_right, bw)
if len(bins)<=2:
bins = [bins[0]-bw] + list(bins) + [bins[-1]+bw]
sns.histplot(x=data, kde=True, alpha=0.4, ax=ax, bins=bins)
ax.set_title("%s: n=%d, t=%s" % (bed_id, len(data), tsv_str))
ax.set_xlabel(parameter)
ax.set_ylabel("Counts")
ax.grid(axis="y")
ax.set_axisbelow(True)
#ax.set_xlim(VALUE_RANGES[parameter])
ax.set_xticks(bins)
def plot_signals(ax, data, ts, parameter, bed_id, tsv, valid):
tsv_str = tsv.strftime("%H:%M (%d.%m.%y)")
ax.clear()
sns.lineplot(x=ts, y=data, ax=ax, color=[0.6]*3)
ax.plot([tsv], [valid], "o", color="red")
ylim = ax.get_ylim()
ax.plot([tsv, tsv], ylim, color="red")
ax.grid(axis="y")
ax.set_axisbelow(True)
ax.set_title("%s: n=%d, t=%s" % (bed_id, len(data), tsv_str))
ax.set_xlabel(parameter)
ax.set_ylabel("Counts")
fig1 = fig2 = None
if visualize:
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
fig1.canvas.mpl_connect("key_release_event", handle_key)
fig2.canvas.mpl_connect("key_release_event", handle_key)
fig1.canvas.mpl_connect("close_event", handle_close)
fig2.canvas.mpl_connect("close_event", handle_close)
if interactive:
fig1.show()
fig2.show()
for parameter in parameters:
print("Aggregating data for parameter '%s'..." % parameter)
dfs_all = df_sensor[df_sensor["Vitalparameter"]==parameter]
dfv_all = df_valid[df_valid["Vitalparameter"]==parameter]
gs = dfs_all.groupby("Bettenstellplatz")
gv = dfv_all.groupby("Bettenstellplatz")
bed_ids = set(gs.groups) & set(gv.groups)
for bed_id in bed_ids:
dfs = gs.get_group(bed_id)
dfv = gv.get_group(bed_id)
assert(dfs["Timestamp"].is_monotonic_increasing)
assert(dfv["Timestamp"].is_monotonic_increasing)
assert(not dfs["Timestamp"].isna().any())
assert(not dfv["Timestamp"].isna().any())
ts_start = dfv["Timestamp"]-pd.Timedelta(minutes=DELTA_TS)
ts_stop = dfv["Timestamp"]+pd.Timedelta(minutes=DELTA_TS)
i_start = dfs["Timestamp"].searchsorted(ts_start, side="left")
i_stop = dfs["Timestamp"].searchsorted(ts_stop, side="right")
assert(len(i_start)==len(i_stop)==len(dfv))
for j, (i0, i1, tsv, valid) in enumerate(zip(i_start, i_stop,
dfv["Timestamp"],
dfv["Wert"])):
data = dfs.iloc[i0:i1]["Wert"]
ts = dfs.iloc[i0:i1]["Timestamp"]
if skip_zeros:
data = data[data!=0]
valid = dfv.iloc[j]["Wert"]
mean = data.mean()
std = data.std()
median = data.median()
samples = len(data)
zeros = (data==0).sum()
is_in_range = abs(valid-mean) <= DELTAS[parameter]
is_in_range = None if pd.isna(mean) else is_in_range
assert(dfv.loc[dfv.index[j], "Wert"]==dfv.iloc[j]["Wert"])
# Check if the conventional measurement overlaps with the
# available sensor data. i0==i1==0 or i0==i1==len(dfs)
# is true if the conventional measurement j is taken outside
# the range [dfs.Timestamp.min(), dfs.Timestamp.max()].
overlapping = not (i0==i1==0 or i0==i1==len(dfs))
df_valid.loc[dfv.index[j], "Messüberlappung"] = overlapping
df_valid.loc[dfv.index[j], "Sensor (mean)"] = mean
df_valid.loc[dfv.index[j], | |
ans
x = ad.Variable(0.3, label="x")
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sinh(0.3), grad(adnp.sinh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.sinh(0.6), grad(lambda x: adnp.sinh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.sinh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.sinh(0.7),
[
grad(lambda x, y: adnp.sinh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sinh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sinh(0.3), adnp.sinh(0.4), adnp.sinh(0.5)],
[
grad(lambda x: adnp.sinh(x))(0.3),
grad(lambda x: adnp.sinh(x))(0.4),
grad(lambda x: adnp.sinh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.sinh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.sinh(0.3 * 2), adnp.sinh(0.4 * 2), adnp.sinh(0.5 * 2)],
[
grad(lambda x: adnp.sinh(x + x))(0.3),
grad(lambda x: adnp.sinh(x + x))(0.4),
grad(lambda x: adnp.sinh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.sinh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.sinh(0.09), adnp.sinh(0.10), adnp.sinh(0.11)],
[
grad(lambda x, y: adnp.sinh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.sinh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.sinh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_cos():
x = 0.3
ans = ad.cos(x)
sol = adnp.cos(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cos(0.3), grad(adnp.cos)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cos(0.6), grad(lambda x: adnp.cos(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.cos(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.cos(0.7),
[
grad(lambda x, y: adnp.cos(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.cos(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cos(0.3), adnp.cos(0.4), adnp.cos(0.5)],
[
grad(lambda x: adnp.cos(x))(0.3),
grad(lambda x: adnp.cos(x))(0.4),
grad(lambda x: adnp.cos(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cos(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cos(0.3 * 2), adnp.cos(0.4 * 2), adnp.cos(0.5 * 2)],
[
grad(lambda x: adnp.cos(x + x))(0.3),
grad(lambda x: adnp.cos(x + x))(0.4),
grad(lambda x: adnp.cos(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.cos(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.cos(0.09), adnp.cos(0.10), adnp.cos(0.11)],
[
grad(lambda x, y: adnp.cos(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.cos(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.cos(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.cos(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.cos(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.cos(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_arccos():
x = 0.3
ans = ad.arccos(x)
sol = adnp.arccos(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arccos(0.3), grad(adnp.arccos)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.arccos(0.6), grad(lambda x: adnp.arccos(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.arccos(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.arccos(0.7),
[
grad(lambda x, y: adnp.arccos(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arccos(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arccos(0.3), adnp.arccos(0.4), adnp.arccos(0.5)],
[
grad(lambda x: adnp.arccos(x))(0.3),
grad(lambda x: adnp.arccos(x))(0.4),
grad(lambda x: adnp.arccos(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.arccos(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.arccos(0.3 * 2), adnp.arccos(0.4 * 2), adnp.arccos(0.5 * 2)],
[
grad(lambda x: adnp.arccos(x + x))(0.3),
grad(lambda x: adnp.arccos(x + x))(0.4),
grad(lambda x: adnp.arccos(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.arccos(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.arccos(0.09), adnp.arccos(0.10), adnp.arccos(0.11)],
[
grad(lambda x, y: adnp.arccos(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.arccos(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.arccos(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
x = ad.Variable(2, label="x")
with pytest.raises(Exception):
y = ad.arccos(x)
def test_cosh():
x = 0.3
ans = ad.cosh(x)
sol = adnp.cosh(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cosh(0.3), grad(adnp.cosh)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.cosh(0.6), grad(lambda x: adnp.cosh(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.cosh(x)
ans_val, ans_der = y.val, [y.der["x"], y.der["y"]]
sol_val, sol_der = (
adnp.cosh(0.7),
[
grad(lambda x, y: adnp.cosh(x + y), 0)(0.3, 0.4),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.3, 0.4),
],
)
assert ans_val == sol_val
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cosh(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cosh(0.3), adnp.cosh(0.4), adnp.cosh(0.5)],
[
grad(lambda x: adnp.cosh(x))(0.3),
grad(lambda x: adnp.cosh(x))(0.4),
grad(lambda x: adnp.cosh(x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.3, 0.4, 0.5], label="x")
y = ad.cosh(x + x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = (
[adnp.cosh(0.3 * 2), adnp.cosh(0.4 * 2), adnp.cosh(0.5 * 2)],
[
grad(lambda x: adnp.cosh(x + x))(0.3),
grad(lambda x: adnp.cosh(x + x))(0.4),
grad(lambda x: adnp.cosh(x + x))(0.5),
],
)
assert check_list(ans_val, sol_val)
assert check_list(ans_der, sol_der)
x = ad.Variable([0.03, 0.04, 0.05], label="x")
y = ad.Variable([0.06, 0.06, 0.06], label="y")
y = ad.cosh(x + y)
ans_val, ans_der_x, ans_der_y = y.val, y.der["x"], y.der["y"]
sol_val, sol_der_x, sol_der_y = (
[adnp.cosh(0.09), adnp.cosh(0.10), adnp.cosh(0.11)],
[
grad(lambda x, y: adnp.cosh(x + y), 0)(0.030, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 0)(0.040, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 0)(0.050, 0.060),
],
[
grad(lambda x, y: adnp.cosh(x + y), 1)(0.030, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.040, 0.060),
grad(lambda x, y: adnp.cosh(x + y), 1)(0.050, 0.060),
],
)
assert check_list(ans_val, sol_val)
assert check_list(sol_der_x, ans_der_x) & check_list(sol_der_y, ans_der_y)
def test_tan():
x = 0.3
ans = ad.tan(x)
sol = adnp.tan(x)
assert sol == ans
x = ad.Variable(0.3, label="x")
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tan(0.3), grad(adnp.tan)(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + 0.3
y = ad.tan(x)
ans_val, ans_der = y.val, y.der["x"]
sol_val, sol_der = adnp.tan(0.6), grad(lambda x: adnp.tan(x + 0.3))(0.3)
assert ans_val == sol_val
assert math.isclose(ans_der, sol_der)
x = ad.Variable(0.3, label="x") + ad.Variable(0.4, label="y")
y = ad.tan(x)
ans_val, ans_der | |
global
'http://www.openstat.ru/',
# Why: #8116 in Alexa global
'http://www.adlandpro.com/',
# Why: #8117 in Alexa global
'http://www.trivago.de/',
# Why: #8118 in Alexa global
'http://feiren.com/',
# Why: #8119 in Alexa global
'http://www.lespac.com/',
# Why: #8120 in Alexa global
'http://www.icook.tw/',
# Why: #8121 in Alexa global
'http://www.iceporn.com/',
# Why: #8122 in Alexa global
'http://www.animehere.com/',
# Why: #8123 in Alexa global
'http://www.klix.ba/',
# Why: #8124 in Alexa global
'http://www.elitepvpers.com/',
# Why: #8125 in Alexa global
'http://www.mrconservative.com/',
# Why: #8126 in Alexa global
'http://www.tamu.edu/',
# Why: #8127 in Alexa global
'http://www.startv.com.tr/',
# Why: #8128 in Alexa global
'http://www.haber1903.com/',
# Why: #8129 in Alexa global
'http://www.apa.tv/',
# Why: #8130 in Alexa global
'http://uc.cn/',
# Why: #8131 in Alexa global
'http://www.idbi.com/',
# Why: #8132 in Alexa global
'http://www.golfchannel.com/',
# Why: #8133 in Alexa global
'http://www.pep.ph/',
# Why: #8134 in Alexa global
'http://www.toukoucity.to/',
# Why: #8135 in Alexa global
'http://www.empiremoney.com/',
# Why: #8136 in Alexa global
'http://www.androidauthority.com/',
# Why: #8137 in Alexa global
'http://www.ref4bux.com/',
# Why: #8138 in Alexa global
'http://www.digitaljournal.com/',
# Why: #8139 in Alexa global
'http://www.sporcle.com/',
# Why: #8141 in Alexa global
'http://www.183.com.cn/',
# Why: #8142 in Alexa global
'http://www.bzwbk.pl/',
# Why: #8143 in Alexa global
'http://lalamao.com/',
# Why: #8144 in Alexa global
'http://www.ziare.com/',
# Why: #8145 in Alexa global
'http://www.cliti.com/',
# Why: #8146 in Alexa global
'http://www.thatguywiththeglasses.com/',
# Why: #8147 in Alexa global
'http://www.vodu.ch/',
# Why: #8148 in Alexa global
'http://www.ycwb.com/',
# Why: #8149 in Alexa global
'http://www.bls.gov/',
# Why: #8150 in Alexa global
'http://www.matsui.co.jp/',
# Why: #8151 in Alexa global
'http://xmrc.com.cn/',
# Why: #8152 in Alexa global
'http://1tubenews.com/',
# Why: #8153 in Alexa global
'http://www.cl.ly/',
# Why: #8154 in Alexa global
'http://www.ing.be/',
# Why: #8155 in Alexa global
'http://www.bitterstrawberry.com/',
# Why: #8156 in Alexa global
'http://www.fubar.com/',
# Why: #8157 in Alexa global
'http://www.arabic-keyboard.org/',
# Why: #8158 in Alexa global
'http://www.mejortorrent.com/',
# Why: #8159 in Alexa global
'http://www.trendmicro.com/',
# Why: #8160 in Alexa global
'http://www.ap7am.com/',
# Why: #8161 in Alexa global
'http://www.windowsazure.com/',
# Why: #8162 in Alexa global
'http://www.q8yat.com/',
# Why: #8163 in Alexa global
'http://www.yyv.co/',
# Why: #8164 in Alexa global
'http://www.tvoy-start.com/',
# Why: #8165 in Alexa global
'http://www.creativetoolbars.com/',
# Why: #8166 in Alexa global
'http://www.forrent.com/',
# Why: #8167 in Alexa global
'http://www.mlstatic.com/',
# Why: #8168 in Alexa global
'http://www.like4like.org/',
# Why: #8169 in Alexa global
'http://www.alpha.gr/',
# Why: #8170 in Alexa global
'http://www.amkey.net/',
# Why: #8172 in Alexa global
'http://www.iwiw.hu/',
# Why: #8173 in Alexa global
'http://www.routard.com/',
# Why: #8174 in Alexa global
'http://www.teacherspayteachers.com/',
# Why: #8175 in Alexa global
'http://www.ahashare.com/',
# Why: #8176 in Alexa global
'http://www.ultoo.com/',
# Why: #8177 in Alexa global
'http://www.oakley.com/',
# Why: #8178 in Alexa global
'http://www.upforit.com/',
# Why: #8179 in Alexa global
'http://www.trafficbee.com/',
# Why: #8180 in Alexa global
'http://www.monster.co.uk/',
# Why: #8181 in Alexa global
'http://www.boulanger.fr/',
# Why: #8182 in Alexa global
'http://www.bloglines.com/',
# Why: #8183 in Alexa global
'http://www.wdc.com/',
# Why: #8184 in Alexa global
'http://www.backpackers.com.tw/',
# Why: #8185 in Alexa global
'http://www.el-nacional.com/',
# Why: #8186 in Alexa global
'http://www.bloggertipstricks.com/',
# Why: #8187 in Alexa global
'http://www.oreillyauto.com/',
# Why: #8188 in Alexa global
'http://www.hotpads.com/',
# Why: #8189 in Alexa global
'http://www.tubexvideo.com/',
# Why: #8190 in Alexa global
'http://www.mudainodocument.com/',
# Why: #8191 in Alexa global
'http://www.17car.com.cn/',
# Why: #8192 in Alexa global
'http://www.discoverpedia.info/',
# Why: #8193 in Alexa global
'http://www.noobteens.com/',
# Why: #8194 in Alexa global
'http://www.shockmansion.com/',
# Why: #8195 in Alexa global
'http://www.qudsonline.ir/',
# Why: #8196 in Alexa global
'http://www.mec.es/',
# Why: #8197 in Alexa global
'http://www.vt.edu/',
# Why: #8198 in Alexa global
'http://www.akelite.com/',
# Why: #8199 in Alexa global
'http://www.travelandleisure.com/',
# Why: #8200 in Alexa global
'http://www.sunnewsonline.com/',
# Why: #8201 in Alexa global
'http://www.tok2.com/',
# Why: #8202 in Alexa global
'http://www.truste.org/',
# Why: #8203 in Alexa global
'http://www.2dehands.be/',
# Why: #8204 in Alexa global
'http://www.hf365.com/',
# Why: #8205 in Alexa global
'http://www.westelm.com/',
# Why: #8206 in Alexa global
'http://www.radiko.jp/',
# Why: #8207 in Alexa global
'http://www.real.gr/',
# Why: #8208 in Alexa global
'http://www.blogcms.jp/',
# Why: #8209 in Alexa global
'http://www.downloadming.me/',
# Why: #8210 in Alexa global
'http://www.citromail.hu/',
# Why: #8211 in Alexa global
'http://www.fotocommunity.de/',
# Why: #8212 in Alexa global
'http://www.zapjuegos.com/',
# Why: #8213 in Alexa global
'http://www.aastocks.com/',
# Why: #8214 in Alexa global
'http://www.unb.br/',
# Why: #8215 in Alexa global
'http://www.adchakra.net/',
# Why: #8216 in Alexa global
'http://www.check24.de/',
# Why: #8217 in Alexa global
'http://www.vidto.me/',
# Why: #8218 in Alexa global
'http://www.peekyou.com/',
# Why: #8219 in Alexa global
'http://www.urssaf.fr/',
# Why: #8220 in Alexa global
'http://www.alixixi.com/',
# Why: #8221 in Alexa global
'http://www.winamp.com/',
# Why: #8222 in Alexa global
'http://www.xianguo.com/',
# Why: #8223 in Alexa global
'http://www.indiasextube.net/',
# Why: #8224 in Alexa global
'http://www.fitnea.com/',
# Why: #8225 in Alexa global
'http://www.telemundo.com/',
# Why: #8226 in Alexa global
'http://www.webnode.cz/',
# Why: #8227 in Alexa global
'http://www.kliksaya.com/',
# Why: #8228 in Alexa global
'http://www.wikileaks.org/',
# Why: #8229 in Alexa global
'http://www.myblog.it/',
# Why: #8231 in Alexa global
'http://www.99wed.com/',
# Why: #8232 in Alexa global
'http://www.adorika.com/',
# Why: #8233 in Alexa global
'http://www.siliconrus.com/',
# Why: #8235 in Alexa global
'http://www.dealmoon.com/',
# Why: #8236 in Alexa global
'http://www.ricanadfunds.com/',
# Why: #8237 in Alexa global
'http://www.vietcombank.com.vn/',
# Why: #8238 in Alexa global
'http://www.chemistry.com/',
# Why: #8239 in Alexa global
'http://www.reisen.de/',
# Why: #8240 in Alexa global
'http://www.torlock.com/',
# Why: #8241 in Alexa global
'http://www.wsop.com/',
# Why: #8242 in Alexa global
'http://www.travian.co.id/',
# Why: #8243 in Alexa global
'http://www.ipoll.com/',
# Why: #8244 in Alexa global
'http://www.bpiexpressonline.com/',
# Why: #8245 in Alexa global
'http://www.neeu.com/',
# Why: #8246 in Alexa global
'http://www.atmarkit.co.jp/',
# Why: #8247 in Alexa global
'http://www.beyondtherack.com/',
# Why: #8248 in Alexa global
'http://blueidea.com/',
# Why: #8249 in Alexa global
'http://www.tedata.net/',
# Why: #8250 in Alexa global
'http://www.gamesradar.com/',
# Why: #8251 in Alexa global
'http://www.big.az/',
# Why: #8252 in Alexa global
'http://www.h-douga.net/',
# Why: #8253 in Alexa global
'http://www.runnersworld.com/',
# Why: #8254 in Alexa global
'http://www.lumfile.com/',
# Why: #8255 in Alexa global
'http://ccoo.cn/',
# Why: #8256 in Alexa global
'http://www.u17.com/',
# Why: #8257 in Alexa global
'http://www.badjojo.com/',
# Why: #8259 in Alexa global
'http://eplus.jp/',
# Why: #8260 in Alexa global
'http://www.nginx.org/',
# Why: #8261 in Alexa global
'http://www.filmfanatic.com/',
# Why: #8262 in Alexa global
'http://www.filmey.com/',
# Why: #8263 in Alexa global
'http://www.mousebreaker.com/',
# Why: #8264 in Alexa global
'http://www.mihanstore.net/',
# Why: #8265 in Alexa global
'http://www.sharebuilder.com/',
# Why: #8266 in Alexa global
'http://cnhan.com/',
# Why: #8267 in Alexa global
'http://www.partnerwithtom.com/',
# Why: #8268 in Alexa global
'http://www.synonym.com/',
# Why: #8269 in Alexa global
'http://www.areaconnect.com/',
# Why: #8271 in Alexa global
'http://www.one.lt/',
# Why: #8272 in Alexa global
'http://www.mp3quran.net/',
# Why: #8273 in Alexa global
'http://www.anz.co.nz/',
# Why: #8274 in Alexa global
'http://www.buyincoins.com/',
# Why: #8275 in Alexa global
'http://www.surfline.com/',
# Why: #8276 in Alexa global
'http://www.packtpub.com/',
# Why: #8277 in Alexa global
'http://www.informe21.com/',
# Why: #8278 in Alexa global
'http://www.d4000.com/',
# Why: #8279 in Alexa global
'http://www.blog.cz/',
# Why: #8280 in Alexa global
'http://www.myredbook.com/',
# Why: #8281 in Alexa global
'http://www.seslisozluk.net/',
# Why: #8282 in Alexa global
'http://www.simple2advertise.com/',
# Why: #8283 in Alexa global
'http://www.bookit.com/',
# Why: #8284 in Alexa global
'http://www.eranico.com/',
# Why: #8285 in Alexa global
'http://www.pakwheels.com/',
# Why: #8286 in Alexa global
'http://www.x-rates.com/',
# Why: #8287 in Alexa global
'http://www.ilmatieteenlaitos.fi/',
# Why: #8288 in Alexa global
'http://www.vozforums.com/',
# Why: #8289 in Alexa global
'http://www.galerieslafayette.com/',
# Why: #8290 in Alexa global
'http://www.trafficswirl.com/',
# Why: #8291 in Alexa global
'http://www.mql4.com/',
# Why: #8292 in Alexa global
'http://www.torontosun.com/',
# Why: #8293 in Alexa global
'http://www.channel.or.jp/',
# Why: #8295 in Alexa global
'http://www.lebuteur.com/',
# Why: #8296 in Alexa global
'http://www.cruisecritic.com/',
# Why: #8297 in Alexa global
'http://www.rateyourmusic.com/',
# Why: #8298 in Alexa global
'http://www.binsearch.info/',
# Why: #8299 in Alexa global
'http://www.nrj.fr/',
# Why: #8300 in Alexa global
'http://www.megaflix.net/',
# Why: #8301 in Alexa global
'http://www.dosug.cz/',
# Why: #8302 in Alexa global
'http://www.spdb.com.cn/',
# Why: #8303 in Alexa global
'http://www.stop55.com/',
# Why: #8304 in Alexa global
'http://www.qqnz.com/',
# Why: #8305 in | |
from django.utils import timezone
from .models import *
from .constants import *
# Reading material for double entry accounting:
# http://en.wikipedia.org/wiki/Double-entry_bookkeeping_system
# http://www.find-uk-accountant.co.uk/articles/fua/16
# http://www.accountingcoach.com/accounts-receivable-and-bad-debts-expense/explanation
# http://www.ledger-cli.org/3.0/doc/ledger3.html
# Completed Contract Accounting Method
# https://en.wikipedia.org/wiki/Completed-contract_method
# Revenue Recognition
# https://en.wikipedia.org/wiki/Revenue_recognition
def debit_jobs(debits, transacted_on=None, recognize_revenue=False, debug=False):
"""
Debit the customer accounts with any new work completed or just flat amount.
Credit the promised payments account with the same amount to balance the transaction.
"""
transacted_on = transacted_on or timezone.now()
transaction = Transaction(
transacted_on=transacted_on,
transaction_type=Transaction.INVOICE,
is_revenue_recognized=recognize_revenue,
)
for job, debit_amount, entry_type in debits:
assert debit_amount.gross >= 0
assert entry_type in (Entry.WORK_DEBIT, Entry.FLAT_DEBIT)
if recognize_revenue or job.is_revenue_recognized:
# we're creating a final invoice or this job was already on a final invoice before
if not job.is_revenue_recognized:
# this job wasn't on any final invoices before, so we're switching
# from delayed revenue recognition to recognized revenue and
# this means we need to move all of the built-up partial payments and promised payments
# into the revenue account
# Moving Partial Payments
partial_payments_account = Account.objects.get(
code=SKR03_PARTIAL_PAYMENTS_CODE
)
prior_income = partial_payments_account.entries.filter(job=job).sum
assert (
prior_income.net >= 0
) # only way this fails is if total refunds > total payments
assert (
prior_income.tax == 0
) # income should not include any tax entries
if prior_income.net > 0:
# debit the partial payments account (liability), decreasing the liability
# (-) "good thing", product or service has been completed and delivered
transaction.debit(
SKR03_PARTIAL_PAYMENTS_CODE,
prior_income.net,
job=job,
value_type=Entry.NET,
)
# credit the income account (income), this increases the balance
# (+) "good thing", income is good
transaction.credit(
SKR03_INCOME_CODE,
prior_income.net,
job=job,
value_type=Entry.NET,
)
# Moving Promised Payments
# okay, not quite moving, more like clearing the promised payments
# we'll add them into the income account a little bit later
account_balance = job.account.balance
if account_balance.net > 0:
# reset balance, by paying it in full
transaction.debit(
SKR03_PROMISED_PAYMENTS_CODE,
account_balance.net,
job=job,
value_type=Entry.NET,
)
transaction.credit(
job.account,
account_balance.net,
entry_type=Entry.ADJUSTMENT,
job=job,
value_type=Entry.NET,
)
elif account_balance.net < 0:
# we can work with a negative balance but only if there is a current debit pending
# that will get the account back to zero or positive (since we can't have a negative invoice)
assert debit_amount.net + account_balance.net >= 0
# reset balance, by refunding it in full
transaction.credit(
SKR03_PROMISED_PAYMENTS_CODE,
account_balance.negate.net,
job=job,
value_type=Entry.NET,
)
transaction.debit(
job.account,
account_balance.negate.net,
entry_type=Entry.ADJUSTMENT,
job=job,
value_type=Entry.NET,
)
if account_balance.tax > 0:
# reset balance, by paying it in full
transaction.debit(
SKR03_PROMISED_PAYMENTS_CODE,
account_balance.tax,
job=job,
value_type=Entry.TAX,
)
transaction.credit(
job.account,
account_balance.tax,
entry_type=Entry.ADJUSTMENT,
job=job,
value_type=Entry.TAX,
)
elif account_balance.tax < 0:
# we can work with a negative balance but only if there is a current debit pending
# that will get the account back to zero or positive (since we can't have a negative invoice)
assert debit_amount.tax + account_balance.tax >= 0
# reset balance, by refunding it in full
transaction.credit(
SKR03_PROMISED_PAYMENTS_CODE,
account_balance.negate.tax,
job=job,
value_type=Entry.TAX,
)
transaction.debit(
job.account,
account_balance.negate.tax,
entry_type=Entry.ADJUSTMENT,
job=job,
value_type=Entry.TAX,
)
# update the new debit increasing or decreasing it depending on the balance
debit_amount += account_balance
# Now we can mark ourselves as revenue recognized for a job well done! Pun intended.
job.is_revenue_recognized = True
job.save()
# debit the customer account (asset), this increases their balance
# (+) "good thing", customer owes us more money
if debit_amount.net > 0:
transaction.debit(
job.account,
debit_amount.net,
entry_type=entry_type,
job=job,
value_type=Entry.NET,
)
if debit_amount.tax > 0:
transaction.debit(
job.account,
debit_amount.tax,
entry_type=entry_type,
job=job,
value_type=Entry.TAX,
)
# credit the income account (income), this increases the balance
# (+) "good thing", income is good
if debit_amount.net > 0:
transaction.credit(
SKR03_INCOME_CODE, debit_amount.net, job=job, value_type=Entry.NET
)
# credit the tax payments account (liability), increasing the liability
# (+) "bad thing", will have to be paid in taxes eventually
if (
debit_amount.tax > 0
): # when the refund is very small (like 0.01) there is no tax
transaction.credit(
SKR03_TAX_PAYMENTS_CODE,
debit_amount.tax,
job=job,
value_type=Entry.TAX,
)
else:
# Still not recognizing revenue, tracking debits in a promised payments account instead..
if debit_amount.gross > 0:
# debit the customer account (asset), this increases their balance
# (+) "good thing", customer owes us more money
if debit_amount.net > 0:
transaction.debit(
job.account,
debit_amount.net,
entry_type=entry_type,
job=job,
value_type=Entry.NET,
)
if debit_amount.tax > 0:
transaction.debit(
job.account,
debit_amount.tax,
entry_type=entry_type,
job=job,
value_type=Entry.TAX,
)
# credit the promised payments account (liability), increasing the liability
# (+) "bad thing", customer owing us money is a liability
if debit_amount.net > 0:
transaction.credit(
SKR03_PROMISED_PAYMENTS_CODE,
debit_amount.net,
job=job,
value_type=Entry.NET,
)
if debit_amount.tax > 0:
transaction.credit(
SKR03_PROMISED_PAYMENTS_CODE,
debit_amount.tax,
job=job,
value_type=Entry.TAX,
)
transaction.save(debug=debug)
return transaction
def credit_jobs(splits, payment, transacted_on=None, bank=None, debug=False):
""" Applies a payment or adjustment. """
assert isinstance(payment, Decimal)
assert payment == sum([p[1].gross for p in splits])
bank = bank or Account.objects.get(code=SKR03_BANK_CODE)
transacted_on = transacted_on or timezone.now()
transaction = Transaction(
transacted_on=transacted_on, transaction_type=Transaction.PAYMENT
)
# debit the bank account (asset)
# (+) "good thing", money in the bank is always good
if payment > 0:
transaction.debit(bank, payment, value_type=Entry.GROSS)
for (job, amount, discount, adjustment) in splits:
if amount.gross > 0:
# credit the customer account (asset), decreasing their balance
# (-) "bad thing", customer owes us less money
if amount.net > 0:
transaction.credit(
job.account,
amount.net,
entry_type=Entry.PAYMENT,
job=job,
value_type=Entry.NET,
)
if amount.tax > 0:
transaction.credit(
job.account,
amount.tax,
entry_type=Entry.PAYMENT,
job=job,
value_type=Entry.TAX,
)
if not job.is_revenue_recognized:
# debit the promised payments account (liability), decreasing the liability
# (-) "good thing", customer paying debt reduces liability
if amount.net > 0:
transaction.debit(
SKR03_PROMISED_PAYMENTS_CODE,
amount.net,
job=job,
value_type=Entry.NET,
)
if amount.tax > 0:
transaction.debit(
SKR03_PROMISED_PAYMENTS_CODE,
amount.tax,
job=job,
value_type=Entry.TAX,
)
# credit the partial payments account (liability), increasing the liability
# (+) "bad thing", we are on the hook to finish and deliver the service or product
if amount.net > 0:
transaction.credit(
SKR03_PARTIAL_PAYMENTS_CODE,
amount.net,
job=job,
value_type=Entry.NET,
)
# credit the tax payments account (liability), increasing the liability
# (+) "bad thing", tax have to be paid eventually
if amount.tax > 0:
transaction.credit(
SKR03_TAX_PAYMENTS_CODE,
amount.tax,
job=job,
value_type=Entry.TAX,
)
for reduction_type, reduction in [
(Entry.DISCOUNT, discount),
(Entry.ADJUSTMENT, adjustment),
]:
if reduction.gross > 0:
# credit the customer account (asset), decreasing their balance
# (-) "bad thing", customer owes us less money
transaction.credit(
job.account,
reduction.net,
entry_type=reduction_type,
job=job,
value_type=Entry.NET,
)
if reduction.tax > 0:
transaction.credit(
job.account,
reduction.tax,
entry_type=reduction_type,
job=job,
value_type=Entry.TAX,
)
if job.is_revenue_recognized:
# Reduction after final invoice has a few more steps involved.
if reduction_type == Entry.DISCOUNT:
# debit the cash discount account (income), indirectly subtracts from the income
# (-) "bad thing", less income :-(
transaction.debit(
SKR03_CASH_DISCOUNT_CODE,
reduction.net,
job=job,
value_type=Entry.NET,
)
elif reduction_type == Entry.ADJUSTMENT:
# debit the income account (income), this decreases the balance
# (+) "bad thing", loss in income :-(
transaction.debit(
SKR03_INCOME_CODE,
reduction.net,
job=job,
value_type=Entry.NET,
)
# debit the tax payments account (liability), decreasing the liability
# (-) "good thing", less taxes to pay
if reduction.tax > 0:
transaction.debit(
SKR03_TAX_PAYMENTS_CODE,
reduction.tax,
job=job,
value_type=Entry.TAX,
)
else:
# Reduction prior to final invoice is simpler.
# debit the promised payments account (liability), decreasing the liability
# (-) "good thing", customer paying debt reduces liability
transaction.debit(
SKR03_PROMISED_PAYMENTS_CODE,
reduction.net,
job=job,
value_type=Entry.NET,
)
if reduction.tax > 0:
transaction.debit(
SKR03_PROMISED_PAYMENTS_CODE,
reduction.tax,
job=job,
value_type=Entry.TAX,
)
transaction.save(debug=debug)
return transaction
def adjust_jobs(jobs, transacted_on=None, debug=False):
transacted_on = transacted_on or timezone.now()
transaction = Transaction(
transacted_on=transacted_on, transaction_type=Transaction.ADJUSTMENT
)
for job, adjustment in jobs:
if adjustment.net != 0:
transaction.signed(
job.account,
adjustment.net,
value_type=Entry.NET,
entry_type=Entry.ADJUSTMENT,
job=job,
)
if job.is_revenue_recognized:
transaction.signed(
SKR03_INCOME_CODE, adjustment.net, value_type=Entry.NET, job=job
)
else:
transaction.signed(
SKR03_PROMISED_PAYMENTS_CODE,
adjustment.net,
value_type=Entry.NET,
job=job,
)
if adjustment.tax != 0:
transaction.signed(
job.account,
adjustment.tax,
value_type=Entry.TAX,
entry_type=Entry.ADJUSTMENT,
job=job,
)
if job.is_revenue_recognized:
transaction.signed(
SKR03_TAX_PAYMENTS_CODE,
adjustment.tax,
value_type=Entry.TAX,
job=job,
)
else:
transaction.signed(
SKR03_PROMISED_PAYMENTS_CODE,
adjustment.tax,
value_type=Entry.TAX,
job=job,
)
transaction.save(debug=debug)
return transaction
def refund_jobs(jobs, transacted_on=None, bank=None, debug=False):
bank = bank or Account.objects.get(code=SKR03_BANK_CODE)
transacted_on = transacted_on or timezone.now()
transaction = Transaction(
transacted_on=transacted_on, transaction_type=Transaction.REFUND
)
bank_refund = Decimal("0.00")
for job, refund, refund_credit in jobs:
if refund.gross > 0:
bank_refund += refund.gross
# debit the customer account (asset), this increases their balance
# (+) "good thing", customer owes us money again
if refund.net > 0:
transaction.debit(
job.account,
refund.net,
entry_type=Entry.REFUND,
job=job,
value_type=Entry.NET,
)
if refund.tax > 0:
transaction.debit(
| |
10*(0/5)),
(10*(1/6), 10*(0/5)),
(10*(1/6), 10*(4/5)),
(10*(-3/6), 10*(4/5))]
)
def test_crop_bounding_boxes_by_fixed_ints_without_keep_size(self):
aug = iaa.Crop((1, 0, 4, 4), keep_size=False)
bbs = [ia.BoundingBox(x1=0, y1=0, x2=10, y2=10),
ia.BoundingBox(x1=1, y1=2, x2=9, y2=10)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(10, 10, 3))
bbsoi_aug = aug.augment_bounding_boxes([bbsoi, bbsoi])
assert len(bbsoi_aug) == 2
for bbsoi_aug_i in bbsoi_aug:
assert bbsoi_aug_i.shape == (5, 6, 3)
assert len(bbsoi_aug_i.bounding_boxes) == 2
assert bbsoi_aug_i.bounding_boxes[0].coords_almost_equals(
[(0-4, 0-1), (10-4, 10-1)]
)
assert bbsoi_aug_i.bounding_boxes[1].coords_almost_equals(
[(1-4, 2-1), (9-4, 10-1)]
)
def test_crop_bounding_boxes_by_fixed_ints_with_keep_size(self):
aug = iaa.Crop((1, 0, 4, 4), keep_size=True)
bbs = [ia.BoundingBox(x1=0, y1=0, x2=10, y2=10),
ia.BoundingBox(x1=1, y1=2, x2=9, y2=10)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(10, 10, 3))
bbsoi_aug = aug.augment_bounding_boxes([bbsoi, bbsoi])
assert len(bbsoi_aug) == 2
for bbsoi_aug_i in bbsoi_aug:
assert bbsoi_aug_i.shape == (10, 10, 3)
assert len(bbsoi_aug_i.bounding_boxes) == 2
assert bbsoi_aug_i.bounding_boxes[0].coords_almost_equals(
[(10*(-4/6), 10*(-1/5)),
(10*(6/6), 10*(9/5))]
)
assert bbsoi_aug_i.bounding_boxes[1].coords_almost_equals(
[(10*(-3/6), 10*(1/5)),
(10*(5/6), 10*(9/5))]
)
def test_crop_by_one_fixed_float_without_keep_size(self):
aug = iaa.Crop(percent=0.1, keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.shape == (40, 40)
assert np.all(observed == image[5:-5, 5:-5])
def test_crop_by_stochastic_parameter_without_keep_size(self):
aug = iaa.Crop(percent=iap.Deterministic(0.1), keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert observed.shape == (40, 40)
assert np.all(observed == image[5:-5, 5:-5])
def test_crop_by_tuple_of_two_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.1, 0.2), keep_size=False)
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
observed = aug.augment_image(image)
assert 30 <= observed.shape[0] <= 40
assert 30 <= observed.shape[1] <= 40
def test_invalid_datatype_for_percent_parameter_fails(self):
got_exception = False
try:
_ = iaa.Crop(percent="test", keep_size=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_crop_by_fixed_float_on_each_side_on_its_own(self):
image = np.random.randint(0, 255, size=(50, 50), dtype=np.uint8)
height, width = image.shape[0:2]
crops = [
(0.1, 0, 0, 0),
(0, 0.1, 0, 0),
(0, 0, 0.1, 0),
(0, 0, 0, 0.1),
]
for crop in crops:
with self.subTest(percent=crop):
aug = iaa.Crop(percent=crop, keep_size=False)
top, right, bottom, left = crop
top_px = int(round(top * height))
right_px = int(round(right * width))
bottom_px = int(round(bottom * height))
left_px = int(round(left * width))
# dont use :-bottom_px and ;-right_px here, because these
# values can be 0
image_cropped = image[top_px:50-bottom_px, left_px:50-right_px]
observed = aug.augment_image(image)
assert np.array_equal(observed, image_cropped)
def _test_crop_cba_by_fixed_float_on_each_side_on_its_own(
self, augf_name, cbaoi):
height, width = cbaoi.shape[0:2]
crops = [
(0.1, 0, 0, 0),
(0, 0.1, 0, 0),
(0, 0, 0.1, 0),
(0, 0, 0, 0.1),
]
for crop in crops:
with self.subTest(augf_name=augf_name, percent=crop):
aug = iaa.Crop(percent=crop, keep_size=False)
top, right, bottom, left = crop
top_px = int(round(top * height))
right_px = int(round(right * width))
left_px = int(round(left * width))
bottom_px = int(round(bottom * height))
observed = getattr(aug, augf_name)(cbaoi)
expected = cbaoi.shift(x=-left_px, y=-top_px)
expected.shape = tuple(
[expected.shape[0] - top_px - bottom_px,
expected.shape[1] - left_px - right_px]
+ list(expected.shape[2:])
)
assert_cbaois_equal(observed, expected)
def test_crop_keypoints_by_fixed_float_on_each_side_on_its_own(self):
height, width = (50, 50)
kps = [ia.Keypoint(x=10, y=11), ia.Keypoint(x=20, y=21),
ia.Keypoint(x=30, y=31)]
kpsoi = ia.KeypointsOnImage(kps, shape=(height, width))
self._test_crop_cba_by_fixed_float_on_each_side_on_its_own(
"augment_keypoints", kpsoi)
def test_crop_polygons_by_fixed_float_on_each_side_on_its_own(self):
height, width = (50, 50)
polygons = [ia.Polygon([(0, 0), (40, 0), (40, 40), (0, 40)]),
ia.Polygon([(10, 10), (50, 10), (50, 50), (10, 50)])]
psoi = ia.PolygonsOnImage(polygons, shape=(height, width, 3))
self._test_crop_cba_by_fixed_float_on_each_side_on_its_own(
"augment_polygons", psoi)
def test_crop_line_strings_by_fixed_float_on_each_side_on_its_own(self):
height, width = (50, 50)
lss = [ia.LineString([(0, 0), (40, 0), (40, 40), (0, 40)]),
ia.LineString([(10, 10), (50, 10), (50, 50), (10, 50)])]
lsoi = ia.LineStringsOnImage(lss, shape=(height, width, 3))
self._test_crop_cba_by_fixed_float_on_each_side_on_its_own(
"augment_line_strings", lsoi)
def test_crop_bounding_boxes_by_fixed_float_on_each_side_on_its_own(self):
height, width = (50, 50)
bbs = [ia.BoundingBox(x1=0, y1=0, x2=40, y2=40),
ia.BoundingBox(x1=10, y1=10, x2=30, y2=40)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(height, width, 3))
self._test_crop_cba_by_fixed_float_on_each_side_on_its_own(
"augment_bounding_boxes", bbsoi)
def test_crop_heatmaps_smaller_than_img_by_fixed_floats_without_ks(self):
# crop smaller heatmaps
# heatmap is (8, 12), image is (16, 32)
# image is cropped by (0.25, 0.25, 0.25, 0.25)
# expected image size: (8, 16)
# expected heatmap size: (4, 6)
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=False)
heatmaps_arr_small = np.zeros((8, 12), dtype=np.float32)
heatmaps_arr_small[2:-2, 4:-4] = 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
heatmaps_arr_small_cropped = \
heatmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (8, 16)
assert observed.arr_0to1.shape == (4, 6, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.arr_0to1[..., 0], heatmaps_arr_small_cropped)
def test_crop_segmaps_smaller_than_img_by_fixed_floats_without_ks(self):
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=False)
segmaps_arr_small = np.zeros((8, 12), dtype=np.int32)
segmaps_arr_small[2:-2, 4:-4] = 1
segmaps = SegmentationMapsOnImage(segmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
segmaps_arr_small_cropped = segmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (8, 16)
assert observed.arr.shape == (4, 6, 1)
assert np.array_equal(observed.arr[..., 0], segmaps_arr_small_cropped)
def test_crop_heatmaps_smaller_than_img_by_fixed_floats_with_ks(self):
# crop smaller heatmaps, with keep_size=True
# heatmap is (8, 12), image is (16, 32)
# image is cropped by (0.25, 0.25, 0.25, 0.25)
# expected image size: (8, 16) -> (16, 32) after resize
# expected heatmap size: (4, 6) -> (8, 12) after resize
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=True)
heatmaps_arr_small = np.zeros((8, 12), dtype=np.float32)
heatmaps_arr_small[2:-2, 4:-4] = 1.0
heatmaps = ia.HeatmapsOnImage(heatmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
heatmaps_arr_small_cropped = \
heatmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (16, 32)
assert observed.arr_0to1.shape == (8, 12, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(
observed.arr_0to1[..., 0],
np.clip(
ia.imresize_single_image(
heatmaps_arr_small_cropped,
(8, 12),
interpolation="cubic"),
0,
1.0
)
)
def test_crop_segmaps_smaller_than_img_by_fixed_floats_with_ks(self):
aug = iaa.Crop(percent=(0.25, 0.25, 0.25, 0.25), keep_size=True)
segmaps_arr_small = np.zeros((8, 12), dtype=np.int32)
segmaps_arr_small[2:-2, 4:-4] = 1
segmaps = SegmentationMapsOnImage(segmaps_arr_small, shape=(16, 32))
top, bottom, left, right = 2, 2, 3, 3
segmaps_arr_small_cropped = segmaps_arr_small[top:-bottom, left:-right]
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (16, 32)
assert observed.arr.shape == (8, 12, 1)
assert np.allclose(
observed.arr[..., 0],
ia.imresize_single_image(
segmaps_arr_small_cropped,
(8, 12),
interpolation="nearest")
)
def test_crop_keypoints_by_fixed_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.25, 0, 0.5, 0.1), keep_size=False)
kps = [ia.Keypoint(x=12, y=10), ia.Keypoint(x=8, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(16, 20, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (4, 18, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, 12-2)
assert np.allclose(kpsoi_aug.keypoints[0].y, 10-4)
assert np.allclose(kpsoi_aug.keypoints[1].x, 8-2)
assert np.allclose(kpsoi_aug.keypoints[1].y, 12-4)
def test_crop_keypoints_by_fixed_floats_with_keep_size(self):
aug = iaa.Crop(percent=(0.25, 0, 0.5, 0.1), keep_size=True)
kps = [ia.Keypoint(x=12, y=10), ia.Keypoint(x=8, y=12)]
kpsoi = ia.KeypointsOnImage(kps, shape=(16, 20, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert kpsoi_aug.shape == (16, 20, 3)
assert len(kpsoi_aug.keypoints) == 2
assert np.allclose(kpsoi_aug.keypoints[0].x, ((12-2)/18)*20)
assert np.allclose(kpsoi_aug.keypoints[0].y, ((10-4)/4)*16)
assert np.allclose(kpsoi_aug.keypoints[1].x, ((8-2)/18)*20)
assert np.allclose(kpsoi_aug.keypoints[1].y, ((12-4)/4)*16)
def test_crop_polygons_by_fixed_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.2, 0, 0.5, 0.1), keep_size=False)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
cbaoi = ia.PolygonsOnImage(polygons, shape=(10, 10, 3))
cbaoi_aug = aug.augment_polygons([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert cbaoi_aug_i.shape == (3, 9, 3)
assert len(cbaoi_aug_i.items) == 2
assert cbaoi_aug_i.items[0].coords_almost_equals(
[(0-1, 0-2), (4-1, 0-2), (4-1, 4-2), (0-1, 4-2)]
)
assert cbaoi_aug_i.items[1].coords_almost_equals(
[(1-1, 1-2), (5-1, 1-2), (5-1, 5-2), (1-1, 5-2)]
)
def test_crop_polygons_by_fixed_floats_with_keep_size(self):
aug = iaa.Crop(percent=(0.2, 0, 0.5, 0.1), keep_size=True)
polygons = [ia.Polygon([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.Polygon([(1, 1), (5, 1), (5, 5), (1, 5)])]
cbaoi = ia.PolygonsOnImage(polygons, shape=(10, 10, 3))
cbaoi_aug = aug.augment_polygons([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert cbaoi_aug_i.shape == (10, 10, 3)
assert len(cbaoi_aug_i.items) == 2
assert cbaoi_aug_i.items[0].coords_almost_equals(
[(10*(-1/9), 10*(-2/3)),
(10*(3/9), 10*(-2/3)),
(10*(3/9), 10*(2/3)),
(10*(-1/9), 10*(2/3))]
)
assert cbaoi_aug_i.items[1].coords_almost_equals(
[(10*(0/9), 10*(-1/3)),
(10*(4/9), 10*(-1/3)),
(10*(4/9), 10*(3/3)),
(10*(0/9), 10*(3/3))]
)
def test_crop_line_strings_by_fixed_floats_without_keep_size(self):
aug = iaa.Crop(percent=(0.2, 0, 0.5, 0.1), keep_size=False)
lss = [ia.LineString([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.LineString([(1, 1), (5, 1), (5, 5), (1, 5)])]
cbaoi = ia.LineStringsOnImage(lss, shape=(10, 10, 3))
cbaoi_aug = aug.augment_line_strings([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert cbaoi_aug_i.shape == (3, 9, 3)
assert len(cbaoi_aug_i.items) == 2
assert cbaoi_aug_i.items[0].coords_almost_equals(
[(0-1, 0-2), (4-1, 0-2), (4-1, 4-2), (0-1, 4-2)]
)
assert cbaoi_aug_i.items[1].coords_almost_equals(
[(1-1, 1-2), (5-1, 1-2), (5-1, 5-2), (1-1, 5-2)]
)
def test_crop_line_strings_by_fixed_floats_with_keep_size(self):
aug = iaa.Crop(percent=(0.2, 0, 0.5, 0.1), keep_size=True)
lss = [ia.LineString([(0, 0), (4, 0), (4, 4), (0, 4)]),
ia.LineString([(1, 1), (5, 1), (5, 5), (1, 5)])]
cbaoi = ia.LineStringsOnImage(lss, shape=(10, 10, 3))
cbaoi_aug = aug.augment_line_strings([cbaoi, cbaoi])
assert len(cbaoi_aug) == 2
for cbaoi_aug_i in cbaoi_aug:
assert cbaoi_aug_i.shape == (10, 10, 3)
assert len(cbaoi_aug_i.items) == 2
assert cbaoi_aug_i.items[0].coords_almost_equals(
[(10*(-1/9), 10*(-2/3)),
(10*(3/9), 10*(-2/3)),
(10*(3/9), 10*(2/3)),
(10*(-1/9), 10*(2/3))]
)
| |
# -*- coding: utf-8 -*-
"""Train a CNN to detect presence of red and blue line on an RDT and also give the normalized y-axis location.
Example:
$ python train_blue_red.py --transfer_learning True
"""
import os
import numpy as np
import cv2
import imgaug as ia
import keras.backend as K
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
import imgaug.augmenters as iaa
from imgaug.augmentables.kps import KeypointsOnImage
from sklearn.utils import class_weight
from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Input, Conv2D, multiply, LocallyConnected2D, Lambda, AvgPool2D
from keras.models import Sequential,Model
from keras.layers import ReLU, Dense, Conv2D, Flatten,Dropout, MaxPooling2D, GlobalAveragePooling3D, LeakyReLU, Activation, BatchNormalization, Input, merge, Softmax
import matplotlib.pyplot as plt
from keras.utils import to_categorical
import tensorflow as tf
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
import random
from keras.constraints import max_norm
from keras.optimizers import Adam
from keras.applications.inception_v3 import InceptionV3,preprocess_input
import argparse
from mixup_generator import MixupGenerator
from tensorflow.keras import layers
import keras
def normalize(data):
return (data - data.mean()) / data.std()
def loadData(noBatchSamples,batchIdx,duplicativeFactor=1,rareData="faint.txt",badData="baddata.txt",rootPathCentreLabel="./obj/labels",rootPathCroppedImages = "./obj/images"):
"""This function loads data from the directory of labels, it works with the yolo data format.
Args:
noBatchSamples (int) : Number of samples per batch
batchIdx (int) : Batch number
duplicativeFactor (int) : Number of times to oversample rare date
rareData (str) : Filenames of rare data samples
rootPathCentreLabel (str) : Directory with labels in yolo format
rootPathCroppedImages (str) : Directory with images, image name and label name should be same eg: 1.jpg 1.txt
Returns:
list: Images in list
list: Targets
list: File names
"""
y_train=[]
X_train=[]
x_faint = []
y_faint = []
name=[]
name_faint=[]
test_data=[]
f = open(rareData)
lines = f.readlines()
lines = [x.strip() for x in lines]
f.close()
f = open(badData)
lines_bad = f.readlines()
lines_bad = [x.strip() for x in lines_bad]
f.close()
blue=0
faintInd=0
for ind,element in enumerate(os.listdir(rootPathCentreLabel)):
if ind >= noBatchSamples*batchIdx and ind<=noBatchSamples*(batchIdx+1) and element.replace(".txt","") not in lines_bad:
with open(os.path.join(rootPathCentreLabel,element)) as fin:
y = [(0,0),(0,0)]
img = cv2.imread(os.path.join(rootPathCroppedImages,element.replace(".txt",".jpg")),cv2.IMREAD_COLOR)
#img = gaussBlur(img)
#img = enhanceImage(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img[:,20:80,:]
for line in fin:
line=line.split(" ")
if line[0]=="0" :
blue+=1
pixel_y_pos =int(float(line[2]))
y[0]=(int(float(line[1])*100),int(float(line[2])*2000))
elif line[0]=="1" :
pixel_y_pos =int(float(line[2]))
if float(line[2])<1:
y[1]=(int(float(line[1])*100),int(float(line[2])*2000))
elif line[0]=="2" :
pixel_y_pos =int(float(line[2]))
if float(line[2])<1:
y[1]=(int(float(line[1])*100),int(float(line[2])*2000))
if element.replace(".txt","") in lines:
x_faint.append(img)
y_faint.append(y)
name_faint.append(element)
else:
y_train.append(y)
X_train.append(img)
name.append(element)
else:
test_data.append(element)
with open("test_data.txt","w") as fout:
fout.write("\n".join(test_data))
# X_train=np.array(X_train,dtype=np.uint8)
# x_faint =np.array(x_faint,dtype=np.uint8)
print("Number of blue",blue)
return X_train,y_train,name,name_faint,x_faint,y_faint #np.zeros((128, 32, 32, 3), dtype=np.uint8) + (batch_idx % 255)
def renormalize(n, range1, range2):
delta1 = range1[1] - range1[0]
delta2 = range2[1] - range2[0]
return (delta2 * (n - range1[0]) / delta1) + range2[0]
def returnLOGker():
sigma=48
scaleConst1=1/(np.pi*(sigma**4))
scaleConst2=1.0/(2*sigma**2)
sizeofKernel=200
sizeofKernel_2=int(sizeofKernel/2)
LOG=np.zeros((sizeofKernel,sizeofKernel))
for rows in range(sizeofKernel):
i=rows-sizeofKernel_2
for cols in range(sizeofKernel):
j=cols-sizeofKernel_2
LOG[rows,cols] = scaleConst1*(1-(i**2+j**2)*scaleConst2)*np.exp(-((i**2+j**2)*scaleConst2))
LOG_oriented=np.zeros((int((sizeofKernel)/4),sizeofKernel))
rowsubsample=0
for rows in range(sizeofKernel):
try:
if rows%4==0:
for cols in range(sizeofKernel):
LOG_oriented[rowsubsample,cols]=LOG[rows,cols]
rowsubsample+=1
except:
pass
LOG_oriented=LOG_oriented-np.mean(LOG_oriented)
LOG_oriented=LOG_oriented*29000
return LOG_oriented
def LOG(im,kernel):
img = im/255.0
img = np.array(img,dtype=np.float32)
imgYUV=cv2.cvtColor(img,cv2.COLOR_RGB2YUV)
imgYUV[:,:,1:]=imgYUV[:,:,1:]-0.5
filtered_img_GB=cv2.filter2D(imgYUV , cv2.CV_32F, kernel)*255
return filtered_img_GB[:,:,1:]
def gaborFilt(im):
g_kernel = cv2.getGaborKernel((9, 51), 6, np.pi/2, 0.2, 0.1, np.pi, ktype=cv2.CV_32F)
img = im/255.0
img = np.array(img,dtype=np.float32)
imgYUV=cv2.cvtColor(img,cv2.COLOR_RGB2YUV)
filtered_img_GB = cv2.filter2D(imgYUV*255, cv2.CV_32F, g_kernel)
renormalized=np.zeros((imgYUV.shape))
# renormalized=renormalized[:,5:95,:]
renormalized[:,:,0]=renormalize(filtered_img_GB[:,:,0],(np.min(filtered_img_GB[:,:,0]),np.max(filtered_img_GB[:,:,0])),(0,255))
renormalized[:,:,1]=renormalize(filtered_img_GB[:,:,1],(np.min(filtered_img_GB[:,:,1]),np.max(filtered_img_GB[:,:,1])),(-128,127))
renormalized[:,:,2]=renormalize(filtered_img_GB[:,:,2],(np.min(filtered_img_GB[:,:,2]),np.max(filtered_img_GB[:,:,2])),(-128,127))
U_p=2.0
V_p=13.0
k1=U_p/V_p
k2=1-k1
newIMG=np.zeros((imgYUV.shape))
# newIMG=newIMG[:,5:95,:]
Final_U = k1*(renormalized[:,:,1]-renormalized[:,:,2])
Final_V = k2*(renormalized[:,:,1]-renormalized[:,:,2])
newIMG[:,:,1]=Final_U
newIMG[:,:,2]=Final_V
newIMG[:,:,0]=renormalized[:,:,0]
newIMG[:,:,0]=renormalize(renormalized[:,:,0],(np.min(renormalized[:,:,0]),np.max(renormalized[:,:,0])),(0,255))
newIMG[:,:,1]=renormalize(Final_U,(np.min(Final_U),np.max(Final_U)),(0,255))
newIMG[:,:,2]=renormalize(Final_V,(np.min(Final_V),np.max(Final_V)),(0,255))
im = newIMG[:,:,1:]
return im
def gaussBlur(img):
img = cv2.GaussianBlur(img,(1,11),0)
return img
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def enhanceImage(img):
img = np.uint8(img)
newimg = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
clahe = cv2.createCLAHE(10, (5,5))
newimg[1]=cv2.normalize(newimg[1], 0, 255, cv2.NORM_MINMAX)
lab_planes = cv2.split(newimg)
lab_planes[1] = clahe.apply(lab_planes[1])
lab = cv2.merge(lab_planes)
result=cv2.cvtColor(lab, cv2.COLOR_HLS2RGB)
result = adjust_gamma(result,0.7)
return result
def key2Target(keypoints,name):
"""This function converts keypoints returned after data augmentation to numpy arrays.
Args:
keypoints (imgaug.augmentables.kps.KeypointsOnImage) : Keypoints on the image
name (list) : File names
Returns:
list: Images in list
list: Targets
list: File names
"""
numred=0
numblue=0
y_test_regression=[]
y_test_categorical = []
for i,k in enumerate(keypoints):
y=np.zeros((2))
y_class=np.zeros((2))
if k[0][1]<=700 and (k[1][1]<=700 or k[1][1]>=1800): # Red line:False && Blue line:False
y[0]=0
y[1]=0
y_class[0]=0
y_class[1]=0
print(name[i],k)
elif k[0][1]>=700 and (k[1][1]<=700 or k[1][1]>=1800): # Red line:False && Blue line:True
numblue+=1
y[0]=(k[0][1]-1000)/500
y[1]=0
y_class[0]=1
y_class[1]=0
elif (k[1][1]>=700 and k[1][1]<=1900) and k[0][1]>=700: # Red line:True && Blue line:True
numblue+=1
numred+=1
y[0]=(k[0][1]-1000)/500
y[1]=(k[1][1]-1000)/500
y_class[0]=1
y_class[1]=1
else:
print(name[i])
y_test_regression.append(np.array(y))
y_test_categorical.append(np.array(y_class))
print("number of blue",numblue,"number of red",numred)
return np.array(y_test_regression),np.array(y_test_categorical)
def returnAugmentationObj(percentageOfChance=0.9):
"""This function returns an augementation pipeline which can be used to augment training data.
Args:
percentageOfChance (float) : Percentage of chance , eg: if it is 0.5, 50% of the images will go through the pipeline
Returns:
:class:`imgaug.augmenters.meta.Sequential` : Image augmentor
"""
sometimes = lambda aug: iaa.Sometimes(percentageOfChance, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential(
[
# sometimes(iaa.Affine(
# translate_percent={"x": (-0.06, 0.06)}, # translate by -x to +x percent (per axis)
# rotate=(-5, 5) # rotate by -x to +x degrees
# )),
# execute 0 to 2 of the following (less important) augmenters per image
# don't execute all of them, as that would often be way too strong
# iaa.Dropout(0.02, name="Dropout"),
iaa.Add((-5,5),per_channel=0.5),
iaa.Fliplr(0.5), # horizontally flip 50% of the images
iaa.AddToHueAndSaturation((-5, 5),per_channel=0.5), # change hue and saturation
iaa.SomeOf((0, 2),
[
iaa.OneOf([
iaa.GaussianBlur((0, 4.0)), # blur images with a sigma between 0 and 3.0
iaa.AverageBlur(k=(2, 8)), # blur image using local means with kernel sizes between 2 and 7
iaa.MedianBlur(k=(3, 13)), # blur image using local medians with kernel sizes between 2 and 7
]),
iaa.GammaContrast((0.8,1.2),per_channel=True),
#iaa.OneOf([
# sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.016))), # Add perscpective transform
# iaa.Affine(rotate=(-2, 2),scale=(0.75,1.2)) # rotate by -x to +x degrees
#]),
],
random_order=True
)
])
return seq
def lossReg(y_true,y_pred):
"""Custom loss function to penalize A type virus versus B type written for keras.
"""
mask=K.ones_like(y_true)
l=K.square(y_pred-y_true)
penalty = tf.constant([10.0])
mask =tf.add(penalty,tf.to_float (tf.math.logical_or(tf.math.logical_and(tf.math.greater(y_true[:,0],y_true[:,1]),tf.math.less(y_pred[:,0],y_pred[:,1])),tf.math.logical_and(tf.math.less(y_true[:,0],y_true[:,1]),tf.math.greater(y_pred[:,0],y_pred[:,1])))))
mask = tf.stack([K.ones_like(y_true[:,0]),mask],axis=1)
return K.mean(tf.math.multiply(l,mask),axis=-1)
def returnModel(loadWeights,weightsFile="./red_blue.hdf5"):
"""This function returns a keras model.
Args:
loadWeights (bool) : Load weights specified in the weightsFile param
weightsFile (str) : Path to weights
Returns:
:class:`keras.model.Model` : Neural Network
"""
x = Input(shape=(500, 100,3))
conv1=Conv2D(8, (3,3), padding='valid')(x)
batchnorm1 = BatchNormalization()(conv1)
act1 = ReLU()(batchnorm1)
conv2=Conv2D(8, (3,3), padding='valid')(act1)
batchnorm2 = BatchNormalization()(conv2)
act2 = ReLU()(batchnorm2)
maxpool2 = MaxPooling2D((2,2))(act2)
conv3=Conv2D(16, (3,3), padding='valid')(maxpool2)
batchnorm3 = BatchNormalization()(conv3)
act3 = ReLU()(batchnorm3)
conv4=Conv2D(16, (3,3), padding='valid')(act3)
batchnorm4 = BatchNormalization()(conv4)
act4 = ReLU()(batchnorm4)
maxpool3 = MaxPooling2D((2,2))(act4)
flat1 = Flatten()(maxpool3)
D1 = Dense(256)(flat1)
batchnorm5 = BatchNormalization()(D1)
act5 = ReLU()(batchnorm5)
D2 = Dense(128,kernel_constraint=max_norm(2))(act5)
batchnorm6 = BatchNormalization()(D2)
act6 = ReLU()(batchnorm6)
D_soft = Dense(2)(act6)
batchnorm7 = BatchNormalization()(D_soft)
out1 = Activation('sigmoid',name="cat_kash")(batchnorm7)
D_sigmoid_blue = Dense(1)(act6)
batchnorm8 = BatchNormalization()(D_sigmoid_blue)
out_blue = Activation('sigmoid',name="reg_blue")(batchnorm8)
D_sigmoid_red = Dense(1)(act6)
batchnorm9 = BatchNormalization()(D_sigmoid_red)
out_red = Activation('sigmoid',name="reg_red")(batchnorm9)
model = Model(inputs=x, outputs=[out1,out_blue,out_red])
if (loadWeights):
model.load_weights(weightsFile,by_name=True)
return model
def res_net_block(input_data, filters, conv_size):
x = layers.Conv2D(filters, conv_size, activation='relu', padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.00001))(input_data)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(filters, conv_size, activation=None, padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.00001))(x)
x = layers.BatchNormalization()(x)
x = layers.Add()([x, input_data])
x = layers.Activation('relu')(x)
return x
def modelShredding(loadWeights,weightsFile="./red_blue_shred.hdf5"):
"""This function returns a keras model.
Args:
loadWeights (bool) : Load weights specified in the weightsFile param
weightsFile (str) : Path to weights
Returns:
:class:`keras.model.Model` : Neural Network
"""
x = Input(shape=(20,60,2))
conv1=Conv2D(64, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.0001))(x)
conv1=Conv2D(64, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.0001))(conv1)
batchnorm1 = BatchNormalization()(conv1)
act1 = ReLU()(batchnorm1)
conv2=Conv2D(128, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.001))(act1)
conv2=Conv2D(128, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.001))(conv2)
batchnorm2 = BatchNormalization()(conv2)
act2 = ReLU()(batchnorm2)
#num_res_net_blocks = 4
#for i in range(num_res_net_blocks):
# act2 = res_net_block(act2, 32, 3)
conv3=Conv2D(256, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.001))(act2)
conv3=Conv2D(256, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.001))(conv3)
batchnorm3 = BatchNormalization()(conv3)
act3 = ReLU()(batchnorm3)
conv4=Conv2D(512, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.001))(act3)
conv4=Conv2D(512, (3,3), padding='same',kernel_initializer=keras.initializers.lecun_uniform(seed=None),kernel_regularizer=keras.regularizers.l2(l=0.001))(conv4)
batchnorm4 = BatchNormalization()(conv4)
act4 = ReLU()(batchnorm4)
#num_res_net_blocks = 4
#for i in range(num_res_net_blocks):
# act4 = res_net_block(act4, 64, 3)
globalAveragePooling=GlobalAveragePooling2D()(act4)
D1 = Dense(256)(globalAveragePooling)
batchnorm3 = BatchNormalization()(D1)
act3 = ReLU()(batchnorm3)
predictor = Dense(3)(act3)
batchnorm9 = BatchNormalization()(predictor)
output = Activation('softmax',name="classification")(batchnorm9)
model = Model(inputs=x, outputs=output)
if (loadWeights):
model.load_weights(weightsFile,by_name=True)
return model
def modelTransferLearning(loadWeights,weightsFile="./red_blue_transf.hdf5"):
"""This function returns a keras model with a pretrained Inception v3 | |
one is internally converted to type two. For 2 models with four features, this looks like: [0.1,0.9] -> [[0.1,0.9],[0.1,0.9],[0.1,0.9],[0.1,0.9]]
default: uniform weights (None)
output_file: filepath of output phrase table. If it ends with .gz, file is automatically zipped.
output_lexical: If defined, also writes combined lexical tables. Writes to output_lexical.e2f and output_lexical.f2e, or output_lexical.counts.e2f in mode 'counts'.
mode: declares the basic mixture-model algorithm. there are currently three options:
'counts': weighted counts (requires some statistics that Moses doesn't produce. Repeat step 4 of Moses training with the option -write-lexical-counts to obtain them.)
Only the standard Moses features are recomputed from weighted counts; additional features are linearly interpolated
(see number_of_features to allow more features, and i_e2f etc. if the standard features are in a non-standard position)
'interpolate': linear interpolation
'loglinear': loglinear interpolation (careful: this creates the intersection of phrase tables and is often of little use)
number_of_features: could be used to interpolate models with non-default Moses features. 4 features is currently still hardcoded in various places
(e.g. cross_entropy calculations, mode 'counts')
i_e2f,i_e2f_lex,i_f2e,i_f2e_lex: Index of the (Moses) phrase table features p(s|t), lex(s|t), p(t|s) and lex(t|s).
Relevant for mode 'counts', and if 'recompute_lexweights' is True in mode 'interpolate'. In mode 'counts', any additional features are combined through linear interpolation.
model_interface: class that handles reading phrase tables and lexical tables, and writing phrase tables. Currently only Moses is implemented.
default: Moses
reference_interace: class that deals with reading in reference phrase pairs for cross-entropy computation
Moses_Alignment: Word/phrase pairs as computed by Giza++ and extracted through Moses heuristics. This corresponds to the file model/extract.gz if you train a Moses model on your tuning set.
TigerXML: TigerXML data format
default: Moses_Alignment
reference_file: path to reference file. Required for every operation except combination of models with given weights.
lang_src: source language. Only required if reference_interface is TigerXML. Identifies which language in XML file we should treat as source language.
lang_target: target language. Only required if reference_interface is TigerXML. Identifies which language in XML file we should treat as target language.
intersected_cross-entropies: compute cross-entropies of intersection of phrase pairs, ignoring phrase pairs that do not occur in all models.
If False, algorithm operates on union of phrase pairs
default: False
add_origin_features: For each model that is being combined, add a binary feature to the final phrase table, with values of 1 (phrase pair doesn't occur in model) and 2.718 (it does).
This indicates which model(s) a phrase pair comes from and can be used during MERT to additionally reward/penalize translation models
lowmem: low memory mode: instead of loading target phrase counts / probability (when required), process the original table and its inversion (source and target swapped) incrementally, then merge the two halves.
tempdir: temporary directory (for low memory mode).
there are a number of further configuration options that you can define, which modify the algorithm for linear interpolation. They have no effect in mode 'counts'
recompute_lexweights: don't directly interpolate lexical weights, but interpolate word translation probabilities instead and recompute the lexical weights.
default: False
normalized: for interpolation of p(x|y): if True, models with p(y)=0 will be ignored, and probability mass will be distributed among models with p(y)>0.
If False, missing entries (x,y) are always interpreted as p(x|y)=0.
default: False
normalize_s_given_t: How to we normalize p(s|t) if 'normalized' is True? Three options:
None: don't normalize p(s|t) and lex(s|t) (only p(t|s) and lex(t|s))
t: check if p(t)==0 : advantage: theoretically sound; disadvantage: slower (we need to know if t occcurs in model); favours rare target phrases (relative to default choice)
s: check if p(s)==0 : advantage: relevant for task; disadvantage: no true probability distributions
default: None
normalize-lexical_weights: also normalize lex(s|t) and lex(t|s) if 'normalized' ist True:
reason why you might want to disable this: lexical weights suffer less from data sparseness than probabilities.
default: True
"""
self.mode = mode
self.output_file = output_file
self.lang_src = lang_src
self.lang_target = lang_target
self.loaded = defaultdict(int)
self.output_lexical = output_lexical
self.flags = copy.copy(self.flags)
self.flags.update(flags)
self.flags['i_e2f'] = int(self.flags['i_e2f'])
self.flags['i_e2f_lex'] = int(self.flags['i_e2f_lex'])
self.flags['i_f2e'] = int(self.flags['i_f2e'])
self.flags['i_f2e_lex'] = int(self.flags['i_f2e_lex'])
if reference_interface:
self.reference_interface = reference_interface(reference_file)
if mode not in ['interpolate','loglinear','counts']:
sys.stderr.write('Error: mode must be either "interpolate", "loglinear" or "counts"\n')
sys.exit(1)
models,number_of_features,weights = self._sanity_checks(models,number_of_features,weights)
self.weights = weights
self.models = models
self.model_interface = model_interface(models,number_of_features)
if mode == 'interpolate':
self.score = score_interpolate
elif mode == 'loglinear':
self.score = score_loglinear
elif mode == 'counts':
self.score = score_counts
def _sanity_checks(self,models,number_of_features,weights):
"""check if input arguments make sense (correct number of weights, valid model priorities etc.)
is only called on initialization. If you change weights afterwards, better know what you're doing.
"""
number_of_features = int(number_of_features)
for (model,priority) in models:
assert(priority in self._priorities)
models = [(model,self._priorities[p]) for (model,p) in models]
# accept two types of weight declarations: one weight per model, and one weight per model and feature
# type one is internally converted to type two: [0.1,0.9] -> [[0.1,0.9],[0.1,0.9],[0.1,0.9],[0.1,0.9]]
if weights:
if type(weights[0]) == list:
assert(len(weights)==number_of_features)
for sublist in weights:
assert(len(sublist)==len(models))
else:
assert(len(models) == len(weights))
weights = [weights for i in range(number_of_features)]
else:
if self.mode == 'loglinear' or self.mode == 'interpolate':
weights = [[1/len(models)]*len(models) for i in range(number_of_features)]
elif self.mode == 'counts':
weights = [[1]*len(models) for i in range(number_of_features)]
sys.stderr.write('Warning: No weights defined: initializing with uniform weights\n')
new_weights = normalize_weights(weights,self.mode,self.flags)
if weights != new_weights:
if self.mode == 'interpolate' or self.mode == 'loglinear':
sys.stderr.write('Warning: weights should sum to 1 - ')
elif self.mode == 'counts':
sys.stderr.write('Warning: normalizing weights so that first model has weight 1 (for features that are recomputed from counts) - ')
sys.stderr.write('normalizing to: '+ str(new_weights) +'\n')
weights = new_weights
return models,number_of_features,weights
def _ensure_loaded(self,data):
"""load data (lexical tables; reference alignment; phrase table), if it isn't already in memory"""
if 'lexical' in data:
self.model_interface.require_alignment = True
if 'reference' in data and not self.loaded['reference']:
sys.stderr.write('Loading word pairs from reference set...')
self.reference_interface.load_word_pairs(self.lang_src,self.lang_target)
sys.stderr.write('done\n')
self.loaded['reference'] = 1
if 'lexical' in data and not self.loaded['lexical']:
sys.stderr.write('Loading lexical tables...')
self.model_interface.load_lexical_tables(self.models,self.mode)
sys.stderr.write('done\n')
self.loaded['lexical'] = 1
if 'pt-filtered' in data and not self.loaded['pt-filtered']:
models_prioritized = [(self.model_interface.open_table(model,'phrase-table'),priority,i) for (model,priority,i) in priority_sort_models(self.models)]
for model,priority,i in models_prioritized:
sys.stderr.write('Loading phrase table ' + str(i) + ' (only data relevant for reference set)')
j = 0
for line in model:
if not j % 1000000:
sys.stderr.write('...'+str(j))
j += 1
line = line.rstrip().split(b' ||| ')
if line[-1].endswith(b' |||'):
line[-1] = line[-1][:-4]
line.append('')
self.model_interface.load_phrase_features(line,priority,i,store='all',mode=self.mode,filter_by=self.reference_interface.word_pairs,filter_by_src=self.reference_interface.word_source,filter_by_target=self.reference_interface.word_target,flags=self.flags)
sys.stderr.write(' done\n')
self.loaded['pt-filtered'] = 1
if 'lexical-filtered' in data and not self.loaded['lexical-filtered']:
e2f_filter, f2e_filter = _get_lexical_filter(self.reference_interface,self.model_interface)
sys.stderr.write('Loading lexical tables (only data relevant for reference set)...')
self.model_interface.load_lexical_tables(self.models,self.mode,e2f_filter=e2f_filter,f2e_filter=f2e_filter)
sys.stderr.write('done\n')
self.loaded['lexical-filtered'] = 1
if 'pt-target' in data and not self.loaded['pt-target']:
models_prioritized = [(self.model_interface.open_table(model,'phrase-table'),priority,i) for (model,priority,i) in priority_sort_models(self.models)]
for model,priority,i in models_prioritized:
sys.stderr.write('Loading target information from phrase table ' + str(i))
j = 0
for line in model:
if not j % 1000000:
sys.stderr.write('...'+str(j))
j += 1
line = line.rstrip().split(b' ||| ')
if line[-1].endswith(b' |||'):
line[-1] = line[-1][:-4]
line.append('')
self.model_interface.load_phrase_features(line,priority,i,mode=self.mode,store='target',flags=self.flags)
sys.stderr.write(' done\n')
self.loaded['pt-target'] = 1
def _inverse_wrapper(self,weights,tempdir=None):
"""if we want to invert the phrase table to better calcualte p(s|t) and lex(s|t), manage creation, sorting and merging of inverted phrase tables"""
sys.stderr.write('Processing first table half\n')
models = [(self.model_interface.open_table(model,'phrase-table'),priority,i) for (model,priority,i) in priority_sort_models(self.model_interface.models)]
pt_half1 = NamedTemporaryFile(prefix='half1',delete=False,dir=tempdir)
self._write_phrasetable(models,pt_half1,weights)
pt_half1.seek(0)
sys.stderr.write('Inverting tables\n')
models = [(self.model_interface.create_inverse(self.model_interface.open_table(model,'phrase-table'),tempdir=tempdir),priority,i) for (model,priority,i) in priority_sort_models(self.model_interface.models)]
sys.stderr.write('Processing second table half\n')
pt_half2_inverted = NamedTemporaryFile(prefix='half2',delete=False,dir=tempdir)
self._write_phrasetable(models,pt_half2_inverted,weights,inverted=True)
pt_half2_inverted.close()
for model,priority,i in models:
model.close()
os.remove(model.name)
pt_half2 = sort_file(pt_half2_inverted.name,tempdir=tempdir)
os.remove(pt_half2_inverted.name)
sys.stderr.write('Merging tables: first half: {0} ; second half: {1} ; final table: {2}\n'.format(pt_half1.name,pt_half2.name,self.output_file))
output_object = handle_file(self.output_file,'open',mode='w')
self.model_interface.merge(pt_half1,pt_half2,output_object,self.mode)
os.remove(pt_half1.name)
os.remove(pt_half2.name)
handle_file(self.output_file,'close',output_object,mode='w')
def _write_phrasetable(self,models,output_object,weights,inverted=False):
"""Incrementally load phrase tables, calculate score for increment and write it to output_object"""
# define which information we need to store from the phrase table
# possible flags: 'all', 'target', 'source' and 'pairs'
# interpolated models without re-normalization | |
<filename>module_particle_systems.py<gh_stars>0
from header_particle_systems import *
####################################################################################################################
# Each particle system contains the following fields:
#
# 1) Particle system id (string): Used for referencing particle systems in other files.
# The prefix psys_ is automatically added before each particle system id.
# 2) Particle system flags (int). See header_particle_systems.py for a list of available flags
# 3) mesh-name.
####
# 4) Num particles per second: Number of particles emitted per second.
# 5) Particle life: Each particle lives this long (in seconds).
# 6) Damping: How much particle's speed is lost due to friction.
# 7) Gravity strength: Effect of gravity. (Negative values make the particles float upwards.)
# 8) Turbulence size: Size of random turbulence (in meters)
# 9) Turbulence strength: How much a particle is affected by turbulence.
####
# 10,11) Alpha keys: Each attribute is controlled by two keys and
# 12,13) Red keys: each key has two fields: (time, magnitude)
# 14,15) Green keys: For example scale key (0.3,0.6) means
# 16,17) Blue keys: scale of each particle will be 0.6 at the
# 18,19) Scale keys: time 0.3 (where time=0 means creation and time=1 means end of the particle)
#
# The magnitudes are interpolated in between the two keys and remain constant beyond the keys.
# Except the alpha always starts from 0 at time 0.
####
# 20) Emit box size: The dimension of the box particles are emitted from.
# 21) Emit velocity: Particles are initially shot with this velocity.
# 22) Emit dir randomness
# 23) Particle rotation speed: Particles start to rotate with this (angular) speed (degrees per second).
# 24) Particle rotation damping: How quickly particles stop their rotation
####################################################################################################################
def psys(identifier, flags, mesh,
number, life, damping=0.0, gravity=0.0, turbulence_size=0.0, turbulence_strength=0.0,
alpha=[(1.0, 1.0), (1.0, 1.0)],
red=[(1.0, 1.0), (1.0, 1.0)],
green=[(1.0, 1.0), (1.0, 1.0)],
blue=[(1.0, 1.0), (1.0, 1.0)],
scale=[(1.0, 1.0), (1.0, 1.0)],
emit_box=(1.0, 1.0, 1.0),
emit_velocity=(0.0, 0.0, 0.0),
emit_direction_randomness=0.0,
rotation_speed=0.0,
rotation_damping=0.0):
return (identifier, flags, mesh, number, life, damping, gravity, turbulence_size, turbulence_strength,
alpha[0], alpha[1], red[0], red[1], green[0], green[1], blue[0], blue[1], scale[0], scale[1],
emit_box, emit_velocity, emit_direction_randomness, rotation_speed, rotation_damping)
particle_systems = [
# PN START *********************************************************************************************
("pistol_smoke", psf_billboard_3d|psf_randomize_size|psf_randomize_rotation, "prtcl_dust_a", # psf_global_emit_dir psf_billboard_3d | psf_randomize_size psf_always_emit
40, 13, 0.9, -0.00003, 40, 1, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.75), (1, 0), #alpha keys
(0.0, 0.7), (1, 0.4), #red keys
(0.0, 0.7),(1, 0.4), #green keys
(0.0, 0.7), (1, 0.4), #blue keys
(0, 4.1), (0.5, 12.0), #scale keys
(0.2, 0.45, 0.2), #emit box size
(0.0, 0.0, 0.0), #emit velocity
0.0, #emit dir randomness
100, #rotation speed
0.5, #rotation damping
),
("cannon_ball_hit", psf_billboard_3d|psf_always_emit|psf_randomize_size, "prtcl_dust_c",
2000, 2, 15, -0.65, 12.0, 0.4, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.5), (1, 0), #alpha keys
(0.1, 0.8), (1, 0.8), #red keys
(0.1, 0.7),(1, 0.7), #green keys
(0.1, 0.6), (1, 0.7), #blue keys
(6.0, 6.7), (7, 8.2), #scale keys
(0.45, 0.46, 3.2), #emit box size
(0, 0, 0.1), #emit velocity
4, #emit dir randomness
15, #rotation speed
0.1, #rotation damping
),
("cannon_blood", psf_billboard_3d |psf_randomize_size|psf_randomize_rotation, "prt_mesh_blood_1",
2000, 1.05, 3, 0.5, 0, 0, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.7), (0.7, 0.7), #alpha keys
(0.1, 0.7), (1, 0.7), #red keys
(0.1, 0.7), (1, 0.7), #green keys
(0.1, 0.7), (1, 0.7), #blue keys
(0.1, 0.11), (1.1, 0.028), #scale keys
(0.10, 0.10, 1), #emit box size
(0, 1.0, 0.3), #emit velocity
0.9, #emit dir randomness
0, #rotation speed
0, #rotation damping
),
("cannon_blood_2", psf_billboard_3d | psf_randomize_size|psf_randomize_rotation , "prt_mesh_blood_3",
2000, 1, 3, 0.3, 0, 0, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.25), (0.7, 0.1), #alpha keys
(0.1, 0.7), (1, 0.7), #red keys
(0.1, 0.7), (1, 0.7), #green keys
(0.1, 0.7), (1, 0.7), #blue keys
(0.3, 0.75), (1.2, 0.65), #scale keys
(0.11, 0.3, 0.11), #emit box size
(0.2, 0.3, 0), #emit velocity
0.3, #emit dir randomness
150, #rotation speed
0, #rotation damping
),
("cannon_smoke", psf_billboard_3d, "prtcl_dust_a",
1900, 11, 1.14, -0.006, 40, 1.75, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.90), (0.85, 0), #alpha keys
(0.0, 0.99), (1, 0.99), #red keys
(0.0, 0.99),(1, 0.99), #green keys
(0.0, 0.99), (1, 0.99), #blue keys
(-0.1, 6), (1.0, 16.0), #scale keys
(0.1, 0.1, 0.1), #emit box size
(6.4, 0.2, 0), #emit velocity
1.8, #emit dir randomness
90, #rotation speed
0.4, #rotation damping
),
("cannon_flash", psf_billboard_3d | psf_randomize_size , "prt_sparks_mesh_1",
3000, 0.4, 0.1, -0.00003, 50.0, 1.0, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.8), (1, 0), #alpha keys
(0.5, 1.0), (1, 0.9), #red keys
(0.5, 0.6), (1, 0.1), #green keys
(0.5, 0.2), (1, 0.0), #blue keys
(-0.1, 3.1), (0.9, 2.2), #scale keys
(0.1, 0.1, 0.1), #emit box size
(2.2, 0.26, 0), #emit velocity #patch1115 fix 21/1
0.05, #emit dir randomness
100.0, #rotation speed
0.5, #rotation damping
),
("musket_flash", psf_billboard_3d | psf_randomize_size , "prt_sparks_mesh_1",
3000, 0.8, 0.1, -0.00003, 50.0, 1.0, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.8), (1, 0), #alpha keys
(0.5, 1.0), (1, 0.9), #red keys
(0.5, 0.6), (1, 0.1), #green keys
(0.5, 0.2), (1, 0.0), #blue keys
(0.0, 0.9), (0.75, 0.1), #scale keys
(0.1, 0.2, 0.1), #emit box size
(0.6, 3.2, 0.6), #emit velocity
0.05, #emit dir randomness
100.0, #rotation speed
0.5, #rotation damping
),
("musket_smoke", psf_billboard_3d|psf_randomize_size|psf_randomize_rotation, "prtcl_dust_a", # psf_global_emit_dir psf_billboard_3d | psf_randomize_size psf_always_emit
75, 22, 2, -0.006040, 90, 3.3, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.45), (0.449, 0), #alpha keys
(0.0, 0.99), (1, 0.99), #red keys
(0.0, 0.99),(1, 0.99), #green keys
(0.0, 0.99), (1, 0.99), #blue keys
(-0.01, 3.45), (0.5, 12.7), #scale keys
(0.05, 0.05, 0.05), #emit box size
(0.0, 5.6, 0.0), #emit velocity
0.85, #emit dir randomness
90, #rotation speed
0.25, #rotation damping
),
("pan_flash", psf_billboard_3d | psf_randomize_size , "prt_sparks_mesh_1",
3000, 0.6, 0.1, -0.00003, 50.0, 1.0, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.8), (1, 0), #alpha keys
(0.5, 1.0), (1, 0.9), #red keys
(0.5, 0.6), (1, 0.1), #green keys
(0.5, 0.2), (1, 0.0), #blue keys
(0.0, 1.0), (0.6, 0.7), #scale keys
(0.1, 0.1, 0.6), #emit box size
(0, 0, 0.6), #emit velocity
0.05, #emit dir randomness
100.0, #rotation speed
0.5, #rotation damping
),
("pan_smoke", psf_billboard_3d|psf_randomize_size|psf_randomize_rotation, "prtcl_dust_a", # psf_global_emit_dir psf_billboard_3d | psf_randomize_size psf_always_emit
60, 10.6, 1.8, -0.0003, 90, 4, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.55), (0.549, 0), #alpha keys
(0.0, 0.99), (1, 0.99), #red keys
(0.0, 0.99),(1, 0.99), #green keys
(0.0, 0.99), (1, 0.99), #blue keys
(-0.07, 1.5), (0.5, 3.75), #scale keys
(0.02, 0.02, 0.02), #emit box size
(-1.6, 0.6, 0.0), #emit velocity
0.0, #emit dir randomness
),
("musket_sparks", psf_billboard_3d|psf_global_emit_dir|psf_always_emit|psf_randomize_size, "prt_sparks_mesh_1",
10, 0.7, 0.2, 0, 10.0, 0.02, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.8), (1, 0), #alpha keys
(0.5, 1.0), (1, 0.9), #red keys
(0.5, 0.6), (1, 0.1), #green keys
(0.5, 0.2), (1, 0.0), #blue keys
(0.1, 0.05), (1, 0.05), #scale keys
(0.1, 0.1, 0.1), #emit box size
(0, 0, 0.9), #emit velocity
0.0, #emit dir randomness
0,
0,
),
("cannon_frizzle_smoke", psf_billboard_3d|psf_randomize_size|psf_randomize_rotation, "prtcl_dust_a", # psf_global_emit_dir psf_billboard_3d | psf_randomize_size psf_always_emit
40, 10, 1.8, -0.0003, 20, 1.75, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.0, 0.75), (0.85, 0), #alpha keys
(0.0, 0.7), (1, 0.4), #red keys
(0.0, 0.7),(1, 0.4), #green keys
(0.0, 0.7), (1, 0.4), #blue keys
(-0.04, 1.5), (0.5, 5.75), #scale keys
(0.02, 0.02, 0.02), #emit box size
(-1.6, 0.6, 0.0), #emit velocity
0.0, #emit dir randomness
),
("bullet_hit_smoke", psf_billboard_3d|psf_randomize_size, "prt_mesh_dust_1",
2000, 4, 15, -0.05, 10.0, 0.2, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.5), (1, 0), #alpha keys
(0.1, 0.8), (1, 0.8), #red keys
(0.1, 0.7),(1, 0.7), #green keys
(0.1, 0.6), (1, 0.7), #blue keys
(0.0, 1.1), (2, 4.2), #scale keys
(0.2, 0.2, 2.2), #emit box size
(-1.2, 0, 0.05), #emit velocity
0.2, #emit dir randomness
10, #rotation speed
0.1, #rotation damping
),
("bottle_break", psf_billboard_3d|psf_randomize_size|psf_randomize_rotation|psf_always_emit, "prtcl_dust_g",
850, 8, 0.1, 1.0, 10, 2, #num_particles, life, damping, gravity_strength, turbulance_size, turbulance_strength
(0.1, 0.5), (1, 0), #alpha keys
(0.1, 0.8), (1, 0.8), #red keys
(0.1, 0.7),(1, 0.7), #green keys
(0.1, 0.6), (1, 0.7), #blue keys
(0.0, 1), (1.5, 1.5), #scale keys
(0.1, 0.1, 0.1), #emit box size
(0, 0, 0), #emit velocity
2.3, #emit dir randomness
50, #rotation speed
0.5, #rotation damping
),
("bird_blood", psf_billboard_3d | psf_randomize_size|psf_randomize_rotation , "prt_mesh_blood_3",
| |
Federal creditor agencies generally are required
to refer debts at no later than 120 days delinquent to the Cross-Servicing Program and TOP. Before
referring a debt to Fiscal Service for collection, federal creditor agencies must provide debtors
with notice and an opportunity to enter into a repayment agreement based on the debtor's financial
circumstances, dispute the debt, or object to the intended collection action. While federal creditor
agencies are responsible for providing this required due process, Fiscal Service also provides
debtors with additional opportunities to resolve their debts prior to the initiation of adverse
collection action. For example, prior to initiating a collection action, the Cross-Servicing
Program sends a demand letter to each debtor, and TOP sends a warning letter to payees before offsetting
recurring payments. Collections are not always mutually exclusive. The amount and count of collections
are recorded for each tool or technique that is used to collect funds.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> reports_on_receivables_service = treasury_client.treasury_reports_on_receivables()
>>> reports_on_receivables_service.collections_delinquent_debt()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
sort = ','.join(sort)
content = self.treasury_session.make_request(
method='get',
endpoint='/v2/debt/tror/collections_delinquent_debt',
params={
'format': 'json',
'page[number]': page_number,
'page[size]': page_size,
'fields': fields,
'sort': sort,
'filters': filters
}
)
return content
def data_act_compliance(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""Queries Data Act Compliance from TROR.
### Overview
----
The 120 Day Delinquent Debt Referral Compliance Report provides access to tracking
and benchmarking compliance with the requirements of a key provision of the Digital
Accountability and Transparency Act of 2014 (the DATA Act). This table provides quick
insights into federal agency compliance rates, as well as information on the number
of eligible debts and debts referred or not referred each quarter, beginning in
Fiscal Year 2016.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> reports_on_receivables_service = treasury_client.treasury_reports_on_receivables()
>>> reports_on_receivables_service.data_act_compliance()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
sort = ','.join(sort)
content = self.treasury_session.make_request(
method='get',
endpoint='/v2/debt/tror/data_act_compliance',
params={
'format': 'json',
'page[number]': page_number,
'page[size]': page_size,
'fields': fields,
'sort': sort,
'filters': filters
}
)
return content
def delinquent_debt(
self,
fields: List[str] = None,
sort: List[str] = None,
filters: List[str] = None,
page_number: int = 1,
page_size: int = 100
) -> Dict:
"""Queries Delinquent Debt from TROR.
### Overview
----
Delinquent Debt provides delinquent debt amounts owed to the federal government by
an individual, organization, or entity other than another federal agency during the
reporting period. A non-tax debt is considered delinquent if it has not been paid by
the date specified in an agency's initial written demand for payment or applicable
agreement. A non-tax debt may become delinquent during the same fiscal year that it
was recorded as a receivable or during a subsequent fiscal year. The total outstanding
delinquent debt balance at the end of a fiscal year is the net of debt that remained
delinquent from previous fiscal years and debt that became delinquent during that fiscal
year, less collections, adjustments, and amounts written off. The calculation of the
amount that became delinquent during the fiscal year is based on debt that was between
1 and 365 days delinquent as of September 30 of the Fiscal Year. Delinquent debts are
categorized by age: 1-180 days, 181 days-2 years, 2-6 years, 6-10 years, and >10 years.
Delinquent debts are also categorized by creditor agency category: Commercial, Consumer,
Foreign and Sovereign Government, and State and Local Government. Beginning in first
quarter of FY2016, the dataset includes additional detailed breakdowns of the U.S.
Government's delinquent debt by age, as required by the DATA Act.
### Parameters
----
fields : List[str] (optional, Default=None)
The fields parameter allows you to select which field(s) should be
included in the response. If desired fields are not specified, all
fields will be returned.
sort : List[str] (optional, Default=None)
The sort parameter allows a user to sort a field in ascending (least
to greatest) or descending (greatest to least) order. When no sort parameter
is specified, the default is to sort by the first column listed. Most API
endpoints are thus sorted by date in ascending order (historical to most
current).
filters : List[str] (optional, Default=None)
Filters are used to view a subset of the data based on specific
criteria. For example, you may want to find data that falls within
a certain date range, or only show records which contain a value
larger than a certain threshold. When no filters are provided,
the default response will return all fields and all data.
page_number : int (optional, Default=1)
The page number will set the index for the pagination, starting
at 1. This allows the user to paginate through the records
returned from an API request
page_size : int (optional, Default=100)
The page size will set the number of rows that are returned
on a request.
### Returns
----
Dict
A collection of `Records` resources.
### Usage
----
>>> treasury_client = FederalTreasuryClient()
>>> reports_on_receivables_service = treasury_client.treasury_reports_on_receivables()
>>> reports_on_receivables_service.delinquent_debt()
"""
if fields:
fields = ','.join(fields)
if filters:
filters = ','.join(filters)
if sort:
| |
<url>
Play IRC art files from web links.
"""
if not channel:
channel = msg.args[0]
if channel != msg.args[0] and not ircdb.checkCapability(msg.prefix, "admin"):
irc.errorNoCapability("admin")
return
if not irc.isChannel(channel):
channel = msg.nick
optlist = dict(optlist)
self.stopped[channel] = False
if "delay" in optlist:
delay = optlist.get("delay")
else:
delay = self.registryValue("delay", msg.args[0])
if url.startswith("https://paste.ee/p/"):
url = url.replace("https://paste.ee/p/", "https://paste.ee/r/")
elif url.startswith("https://pastebin.com/") and "/raw/" not in url:
url = url.replace("https://pastebin.com/", "https://pastebin.com/raw/")
ua = random.choice(self.agents)
header = {"User-Agent": ua}
try:
r = requests.get(url, headers=header, stream=True, timeout=10)
r.raise_for_status()
except (
requests.exceptions.RequestException,
requests.exceptions.HTTPError,
) as e:
log.debug("TextArt: error retrieving data for scroll: {0}".format(e))
return
if "text/plain" in r.headers["content-type"]:
try:
file = r.content.decode().replace("\r\n", "\n")
except:
file = r.text.replace("\r\n", "\n")
else:
irc.reply("Invalid file type.", private=False, notice=False)
return
output = file.splitlines()
asyncio.run(self.reply(irc, output, channel, delay))
scroll = wrap(scroll, [optional("channel"), getopts({"delay": "float"}), "text"])
def a2m(self, irc, msg, args, channel, optlist, url):
"""[<channel>] [--delay] [--l] [--r] [--n] [--p] [--t] [--w] <url>
Convert ANSI files to IRC formatted text. https://github.com/tat3r/a2m
"""
if not channel:
channel = msg.args[0]
if channel != msg.args[0] and not ircdb.checkCapability(msg.prefix, "admin"):
irc.errorNoCapability("admin")
return
if not irc.isChannel(channel):
channel = msg.nick
optlist = dict(optlist)
opts = ""
if "l" in optlist:
l = optlist.get("l")
opts += "-l {0} ".format(l)
if "r" in optlist:
r = optlist.get("r")
opts += "-r {0} ".format(r)
if "n" in optlist:
n = optlist.get("n")
opts += "-n {0}".format(n)
if "p" in optlist:
opts += "-p "
if "t" in optlist:
t = optlist.get("t")
opts += "-t {0} ".format(t)
if "w" in optlist:
w = optlist.get("w")
opts += "-w {0} ".format(w)
else:
opts += "-w 80 "
if "delay" in optlist:
delay = optlist.get("delay")
else:
delay = self.registryValue("delay", msg.args[0])
ua = random.choice(self.agents)
header = {"User-Agent": ua}
try:
r = requests.get(url, stream=True, headers=header, timeout=10)
r.raise_for_status()
except (
requests.exceptions.RequestException,
requests.exceptions.HTTPError,
) as e:
log.debug("TextArt: error retrieving data for a2m: {0}".format(e))
return
try:
if (
"text/plain" in r.headers["content-type"]
or "application/octet-stream" in r.headers["content-type"]
and int(r.headers["content-length"]) < 1000000
):
path = os.path.dirname(os.path.abspath(__file__))
filepath = "{0}/tmp".format(path)
filename = "{0}/{1}".format(filepath, url.split("/")[-1])
open(filename, "wb").write(r.content.replace(b";5;", b";"))
try:
output = pexpect.run(
"a2m {0} {1}".format(opts.strip(), str(filename))
)
try:
os.remove(filename)
except:
pass
except:
irc.reply(
"Error. Have you installed A2M? https://github.com/tat3r/a2m",
private=False,
notice=False,
)
return
else:
irc.reply("Invalid file type.")
return
except:
irc.reply("Invalid file type.")
return
self.stopped[channel] = False
output = re.sub("(\x03(\d+).*)\x03,", "\g<1>\x03\g<2>,", output.decode())
output = output.splitlines()
asyncio.run(self.reply(irc, output, channel, delay))
if self.registryValue("pasteEnable", msg.args[0]):
paste = ""
for line in output:
if not line.strip():
line = "\xa0"
paste += line + "\n"
if self.registryValue("pasteEnable", msg.args[0]):
irc.reply(self.doPaste(url, paste), private=False, notice=False, to=channel)
a2m = wrap(
a2m,
[
optional("channel"),
getopts(
{
"l": "int",
"r": "int",
"t": "int",
"w": "int",
"p": "",
"delay": "float",
}
),
"text",
],
)
def p2u(self, irc, msg, args, channel, optlist, url):
"""[<channel>] [--b] [--f] [--p] [--s] [--t] [--w] [--delay] <url>
Picture to Unicode. https://git.trollforge.org/p2u/about/
"""
if not channel:
channel = msg.args[0]
if channel != msg.args[0] and not ircdb.checkCapability(msg.prefix, "admin"):
irc.errorNoCapability("admin")
return
if not irc.isChannel(channel):
channel = msg.nick
optlist = dict(optlist)
opts = ""
if "b" in optlist:
b = optlist.get("b")
opts += "-b {0} ".format(b)
if "f" in optlist:
f = optlist.get("f")
opts += "-f {0} ".format(f)
else:
opts += "-f m "
if "p" in optlist:
p = optlist.get("p")
opts += "-p {0} ".format(p)
else:
opts += "-p x "
if "s" in optlist:
s = optlist.get("s")
opts += "-s {0} ".format(s)
if "t" in optlist:
t = optlist.get("t")
opts += "-t {0} ".format(t)
if "w" in optlist:
w = optlist.get("w")
opts += "-w {0} ".format(w)
else:
w = self.registryValue("blockWidth", msg.args[0])
opts += "-w {0} ".format(w)
if "delay" in optlist:
delay = optlist.get("delay")
else:
delay = self.registryValue("delay", msg.args[0])
path = os.path.dirname(os.path.abspath(__file__))
filepath = "{0}/tmp".format(path)
filename = "{0}/{1}".format(filepath, url.split("/")[-1])
ua = random.choice(self.agents)
header = {"User-Agent": ua}
image_formats = ("image/png", "image/jpeg", "image/jpg", "image/gif")
try:
r = requests.get(url, stream=True, headers=header, timeout=10)
r.raise_for_status()
except (
requests.exceptions.RequestException,
requests.exceptions.HTTPError,
) as e:
log.debug("TextArt: error retrieving data for p2u: {0}".format(e))
return
if r.headers["content-type"] in image_formats and r.status_code == 200:
with open("{0}".format(filename), "wb") as f:
f.write(r.content)
try:
output = pexpect.run(
"p2u -f m {0} {1}".format(opts.strip(), str(filename))
)
try:
os.remove(filename)
except:
pass
except:
irc.reply(
"Error. Have you installed p2u? https://git.trollforge.org/p2u",
private=False,
notice=False,
)
return
else:
irc.reply("Invalid file type.", private=False, notice=False)
return
self.stopped[channel] = False
output = output.decode().splitlines()
output = [re.sub("^\x03 ", " ", line) for line in output]
asyncio.run(self.reply(irc, output, channel, delay))
if self.registryValue("pasteEnable", msg.args[0]):
paste = ""
for line in output:
if not line.strip():
line = "\xa0"
paste += line + "\n"
if self.registryValue("pasteEnable", msg.args[0]):
irc.reply(self.doPaste(url, paste), private=False, notice=False, to=channel)
else:
irc.reply(
"Unexpected file type or link format", private=False, notice=False
)
p2u = wrap(
p2u,
[
optional("channel"),
getopts(
{
"b": "int",
"f": "text",
"p": "text",
"s": "int",
"t": "int",
"w": "int",
"delay": "float",
}
),
"text",
],
)
def tdf(self, irc, msg, args, channel, optlist, text):
"""[<channel>] [--f] [--j] [--w] [--e] [--r] [--i][--delay] <text>
Text to TheDraw ANSI Fonts. http://www.roysac.com/thedrawfonts-tdf.html
--f [font] Specify font file used.
--j l|r|c Justify left, right, or center. Default is left.
--w n Set screen width. Default is 80.
--c a|m Color format ANSI or mirc. Default is ANSI.
--i Print font details.
--r Use random font.
"""
if not channel:
channel = msg.args[0]
if channel != msg.args[0] and not ircdb.checkCapability(msg.prefix, "admin"):
irc.errorNoCapability("admin")
return
if not irc.isChannel(channel):
channel = msg.nick
if len(text) > self.registryValue("maxLength", msg.channel):
return
if len(text.split(" ")) > self.registryValue("maxWords", msg.channel):
return
optlist = dict(optlist)
opts = ""
if "f" in optlist:
f = optlist.get("f")
opts += "-f {0} ".format(f.lower())
else:
opts += "-r "
if "j" in optlist:
j = optlist.get("j")
opts += "-j {0} ".format(j)
if "w" in optlist:
w = optlist.get("w")
opts += "-w {0} ".format(w)
else:
opts += "-w 80 "
if "e" in optlist:
e = optlist.get("e")
opts += "-e {0} ".format(e)
if "r" in optlist:
opts += "-r "
if "delay" in optlist:
delay = optlist.get("delay")
else:
delay = self.registryValue("delay", msg.args[0])
if "i" in optlist:
opts += "-i "
try:
output = pexpect.run(
"tdfiglet -c m {0} {1}".format(opts.strip(), r"{}".format(text))
)
except:
irc.reply(
"Error. Have you installed tdfiglet? https://github.com/tat3r/tdfiglet",
private=False,
notice=False,
)
return
self.stopped[channel] = False
output = output.decode().replace("\r\r\n", "\r\n")
output = re.sub("\x03\x03\s*", "\x0F ", output)
output = re.sub("\x0F\s*\x03$", "", output)
output = output.splitlines()
asyncio.run(self.reply(irc, output, channel, delay))
if self.registryValue("pasteEnable", msg.args[0]):
paste = ""
for line in output:
if not line.strip():
line = "\xa0"
paste += line + "\n"
if self.registryValue("pasteEnable", msg.args[0]):
irc.reply(
self.doPaste(text, paste), private=False, notice=False, to=channel
)
tdf = wrap(
tdf,
[
optional("channel"),
getopts(
{
"f": "text",
"j": "text",
"w": "int",
"e": "text",
"r": "",
"i": "",
"delay": "float",
}
),
"text",
],
)
def toilet(self, irc, msg, args, channel, optlist, text):
"""[<channel>] [--f fontname] [--F filter1,filter2,etc.] [--w] [--delay] <text>
Text to toilet figlets. -f to select font. -F to select filters. Separate multiple filters with a comma.
"""
if not channel:
channel = msg.args[0]
if channel != msg.args[0] and not ircdb.checkCapability(msg.prefix, "admin"):
irc.errorNoCapability("admin")
return
if not irc.isChannel(channel):
channel = msg.nick
if len(text) > self.registryValue("maxLength", msg.channel):
return
if len(text.split(" ")) > self.registryValue("maxWords", msg.channel):
return
optlist = dict(optlist)
opts = ""
if "f" in optlist:
f = optlist.get("f")
opts += "-f {0} ".format(f)
if "F" in optlist:
filter = optlist.get("F")
if "," in filter:
filter = filter.split(",")
for i in range(len(filter)):
opts += "-F {0} ".format(filter[i])
else:
opts += "-F {0} ".format(filter)
if "w" in optlist:
w = optlist.get("w")
opts += "-w {0} ".format(w)
elif "W" in optlist:
opts += "-W "
else:
opts += "-w 100 "
if "s" in optlist:
opts += "-s "
elif "k" in optlist:
opts += "-k "
elif "o" in optlist:
opts += "-o "
elif "S" in optlist:
opts += "-S "
if "delay" in optlist:
delay = optlist.get("delay")
else:
delay = self.registryValue("delay", msg.args[0])
try:
output = pexpect.run("toilet --irc {0} {1}".format(opts.strip(), text))
except:
irc.reply("Error. Have you installed toilet?", private=False, notice=False)
return
self.stopped[channel] = False
output = output.decode().replace("\r\r\n", "\r\n")
| |
from __future__ import print_function, division
from sympy.core import Add, S, sympify, oo, pi, Symbol, Dummy, expand_func
from sympy.core.compatibility import range, as_int
from sympy.core.function import Function, ArgumentIndexError
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.logic import fuzzy_and, fuzzy_not
from sympy.functions.special.zeta_functions import zeta
from sympy.functions.special.error_functions import erf, erfc
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.integers import ceiling, floor
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import sin, cos, cot
from sympy.functions.combinatorial.numbers import bernoulli, harmonic
from sympy.functions.combinatorial.factorials import factorial, rf, RisingFactorial
def intlike(n):
try:
as_int(n, strict=False)
return True
except ValueError:
return False
###############################################################################
############################ COMPLETE GAMMA FUNCTION ##########################
###############################################################################
class gamma(Function):
r"""
The gamma function
.. math::
\Gamma(x) := \int^{\infty}_{0} t^{x-1} e^{-t} \mathrm{d}t.
The ``gamma`` function implements the function which passes through the
values of the factorial function, i.e. `\Gamma(n) = (n - 1)!` when n is
an integer. More general, `\Gamma(z)` is defined in the whole complex
plane except at the negative integers where there are simple poles.
Examples
========
>>> from sympy import S, I, pi, oo, gamma
>>> from sympy.abc import x
Several special values are known:
>>> gamma(1)
1
>>> gamma(4)
6
>>> gamma(S(3)/2)
sqrt(pi)/2
The Gamma function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(gamma(x))
gamma(conjugate(x))
Differentiation with respect to x is supported:
>>> from sympy import diff
>>> diff(gamma(x), x)
gamma(x)*polygamma(0, x)
Series expansion is also supported:
>>> from sympy import series
>>> series(gamma(x), x, 0, 3)
1/x - EulerGamma + x*(EulerGamma**2/2 + pi**2/12) + x**2*(-EulerGamma*pi**2/12 + polygamma(2, 1)/6 - EulerGamma**3/6) + O(x**3)
We can numerically evaluate the gamma function to arbitrary precision
on the whole complex plane:
>>> gamma(pi).evalf(40)
2.288037795340032417959588909060233922890
>>> gamma(1+I).evalf(20)
0.49801566811835604271 - 0.15494982830181068512*I
See Also
========
lowergamma: Lower incomplete gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Gamma_function
.. [2] http://dlmf.nist.gov/5
.. [3] http://mathworld.wolfram.com/GammaFunction.html
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma/
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return self.func(self.args[0])*polygamma(0, self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif intlike(arg):
if arg.is_positive:
return factorial(arg - 1)
else:
return S.ComplexInfinity
elif arg.is_Rational:
if arg.q == 2:
n = abs(arg.p) // arg.q
if arg.is_positive:
k, coeff = n, S.One
else:
n = k = n + 1
if n & 1 == 0:
coeff = S.One
else:
coeff = S.NegativeOne
for i in range(3, 2*k, 2):
coeff *= i
if arg.is_positive:
return coeff*sqrt(S.Pi) / 2**n
else:
return 2**n*sqrt(S.Pi) / coeff
def _eval_expand_func(self, **hints):
arg = self.args[0]
if arg.is_Rational:
if abs(arg.p) > arg.q:
x = Dummy('x')
n = arg.p // arg.q
p = arg.p - n*arg.q
return self.func(x + n)._eval_expand_func().subs(x, Rational(p, arg.q))
if arg.is_Add:
coeff, tail = arg.as_coeff_add()
if coeff and coeff.q != 1:
intpart = floor(coeff)
tail = (coeff - intpart,) + tail
coeff = intpart
tail = arg._new_rawargs(*tail, reeval=False)
return self.func(tail)*RisingFactorial(tail, coeff)
return self.func(*self.args)
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_real(self):
x = self.args[0]
if x.is_nonpositive and x.is_integer:
return False
if intlike(x) and x <= 0:
return False
if x.is_positive or x.is_noninteger:
return True
def _eval_is_positive(self):
x = self.args[0]
if x.is_positive:
return True
elif x.is_noninteger:
return floor(x).is_even
def _eval_rewrite_as_tractable(self, z, **kwargs):
return exp(loggamma(z))
def _eval_rewrite_as_factorial(self, z, **kwargs):
return factorial(z - 1)
def _eval_nseries(self, x, n, logx):
x0 = self.args[0].limit(x, 0)
if not (x0.is_Integer and x0 <= 0):
return super(gamma, self)._eval_nseries(x, n, logx)
t = self.args[0] - x0
return (self.func(t + 1)/rf(self.args[0], -x0 + 1))._eval_nseries(x, n, logx)
def _sage_(self):
import sage.all as sage
return sage.gamma(self.args[0]._sage_())
###############################################################################
################## LOWER and UPPER INCOMPLETE GAMMA FUNCTIONS #################
###############################################################################
class lowergamma(Function):
r"""
The lower incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\gamma(s, x) := \int_0^x t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \Gamma(s, x).
This can be shown to be the same as
.. math::
\gamma(s, x) = \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
Examples
========
>>> from sympy import lowergamma, S
>>> from sympy.abc import s, x
>>> lowergamma(s, x)
lowergamma(s, x)
>>> lowergamma(3, x)
-2*(x**2/2 + x + 1)*exp(-x) + 2
>>> lowergamma(-S(1)/2, x)
-2*sqrt(pi)*erf(sqrt(x)) - 2*exp(-x)/sqrt(x)
See Also
========
gamma: Gamma function.
uppergamma: Upper incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Incomplete_gamma_function#Lower_incomplete_Gamma_function
.. [2] <NAME>; <NAME>., eds. (1965), Chapter 6, Section 5,
Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables
.. [3] http://dlmf.nist.gov/8
.. [4] http://functions.wolfram.com/GammaBetaErf/Gamma2/
.. [5] http://functions.wolfram.com/GammaBetaErf/Gamma3/
"""
def fdiff(self, argindex=2):
from sympy import meijerg, unpolarify
if argindex == 2:
a, z = self.args
return exp(-unpolarify(z))*z**(a - 1)
elif argindex == 1:
a, z = self.args
return gamma(a)*digamma(a) - log(z)*uppergamma(a, z) \
- meijerg([], [1, 1], [0, 0, a], [], z)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, a, x):
# For lack of a better place, we use this one to extract branching
# information. The following can be
# found in the literature (c/f references given above), albeit scattered:
# 1) For fixed x != 0, lowergamma(s, x) is an entire function of s
# 2) For fixed positive integers s, lowergamma(s, x) is an entire
# function of x.
# 3) For fixed non-positive integers s,
# lowergamma(s, exp(I*2*pi*n)*x) =
# 2*pi*I*n*(-1)**(-s)/factorial(-s) + lowergamma(s, x)
# (this follows from lowergamma(s, x).diff(x) = x**(s-1)*exp(-x)).
# 4) For fixed non-integral s,
# lowergamma(s, x) = x**s*gamma(s)*lowergamma_unbranched(s, x),
# where lowergamma_unbranched(s, x) is an entire function (in fact
# of both s and x), i.e.
# lowergamma(s, exp(2*I*pi*n)*x) = exp(2*pi*I*n*a)*lowergamma(a, x)
from sympy import unpolarify, I
if x == 0:
return S.Zero
nx, n = x.extract_branch_factor()
if a.is_integer and a.is_positive:
nx = unpolarify(x)
if nx != x:
return lowergamma(a, nx)
elif a.is_integer and a.is_nonpositive:
if n != 0:
return 2*pi*I*n*(-1)**(-a)/factorial(-a) + lowergamma(a, nx)
elif n != 0:
return exp(2*pi*I*n*a)*lowergamma(a, nx)
# Special values.
if a.is_Number:
if a is S.One:
return S.One - exp(-x)
elif a is S.Half:
return sqrt(pi)*erf(sqrt(x))
elif a.is_Integer or (2*a).is_Integer:
b = a - 1
if b.is_positive:
if a.is_integer:
return factorial(b) - exp(-x) * factorial(b) * Add(*[x ** k / factorial(k) for k in range(a)])
else:
return gamma(a) * (lowergamma(S.Half, x)/sqrt(pi) - exp(-x) * Add(*[x**(k-S.Half) / gamma(S.Half+k) for k in range(1, a+S.Half)]))
if not a.is_Integer:
return (-1)**(S.Half - a) * pi*erf(sqrt(x)) / gamma(1-a) + exp(-x) * Add(*[x**(k+a-1)*gamma(a) / gamma(a+k) for k in range(1, S(3)/2-a)])
def _eval_evalf(self, prec):
from mpmath import mp, workprec
from sympy import Expr
if all(x.is_number for x in self.args):
a = self.args[0]._to_mpmath(prec)
z = self.args[1]._to_mpmath(prec)
with workprec(prec):
res = mp.gammainc(a, 0, z)
return Expr._from_mpmath(res, prec)
else:
return self
def _eval_conjugate(self):
z = self.args[1]
if not z in (S.Zero, S.NegativeInfinity):
return self.func(self.args[0].conjugate(), z.conjugate())
def _eval_rewrite_as_uppergamma(self, s, x, **kwargs):
return gamma(s) - uppergamma(s, x)
def _eval_rewrite_as_expint(self, s, x, **kwargs):
from sympy import expint
if s.is_integer and s.is_nonpositive:
return self
return self.rewrite(uppergamma).rewrite(expint)
class uppergamma(Function):
r"""
The upper incomplete gamma function.
It can be defined as the meromorphic continuation of
.. math::
\Gamma(s, x) := \int_x^\infty t^{s-1} e^{-t} \mathrm{d}t = \Gamma(s) - \gamma(s, x).
where `\gamma(s, x)` is the lower incomplete gamma function,
:class:`lowergamma`. This can be shown to be the same as
.. math::
\Gamma(s, x) = \Gamma(s) - \frac{x^s}{s} {}_1F_1\left({s \atop s+1} \middle| -x\right),
where :math:`{}_1F_1` is the (confluent) hypergeometric function.
The upper incomplete gamma function is also essentially equivalent to the
generalized exponential integral:
.. math::
\operatorname{E}_{n}(x) = \int_{1}^{\infty}{\frac{e^{-xt}}{t^n} \, dt} = x^{n-1}\Gamma(1-n,x).
Examples
========
>>> from sympy import uppergamma, S
>>> from sympy.abc import s, x
>>> uppergamma(s, x)
uppergamma(s, x)
>>> uppergamma(3, x)
2*(x**2/2 + x + 1)*exp(-x)
>>> uppergamma(-S(1)/2, x)
-2*sqrt(pi)*erfc(sqrt(x)) + 2*exp(-x)/sqrt(x)
>>> uppergamma(-2, x)
expint(3, x)/x**2
See Also
========
gamma: Gamma function.
lowergamma: Lower incomplete gamma function.
polygamma: Polygamma function.
loggamma: Log Gamma function.
digamma: Digamma function.
trigamma: Trigamma function.
sympy.functions.special.beta_functions.beta: Euler Beta function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Incomplete_gamma_function#Upper_incomplete_Gamma_function
| |
# -- coding: utf-8 --
# MIT License
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from scipy.stats import binom, norm, nct
from ..hk import HansonKoopmans
from ..checks import numpy_array, assert_2d_sort
def normal(x, p, g):
r"""
Compute one-side tolerance bound using the normal distribution.
Computes the one-sided tolerance interval using the normal distribution.
This follows the derivation in [1] to calculate the interval as a factor
of sample standard deviations away from the sample mean. See also [2].
Parameters
----------
x : ndarray (1-D, or 2-D)
Numpy array of samples to compute the tolerance bound. Assumed data
type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
number of sets of sample size n.
p : float
Percentile for the TI to estimate.
g : float
Confidence level where g > 0. and g < 1.
Returns
-------
ndarray (1-D)
The normal distribution toleranace bound.
References
----------
[1] <NAME>. S. (2010). tolerance: An R Package for Estimating
Tolerance Intervals. Journal of Statistical Software; Vol 1, Issue 5
(2010). Retrieved from http://dx.doi.org/10.18637/jss.v036.i05
[2] <NAME>., & <NAME>. (2018). Chapter 8. Statistical
Intervals for a Single Sample. In Applied Statistics and Probability
for Engineers, 7th Edition.
Examples
--------
Estimate the 10th percentile lower bound with 95% confidence of the
following 100 random samples from a normal distribution.
>>> import numpy as np
>>> import toleranceinterval as ti
>>> x = np.random.nomral(100)
>>> lb = ti.oneside.normal(x, 0.1, 0.95)
Estimate the 90th percentile upper bound with 95% confidence of the
following 100 random samples from a normal distribution.
>>> ub = ti.oneside.normal(x, 0.9, 0.95)
"""
x = numpy_array(x) # check if numpy array, if not make numpy array
x = assert_2d_sort(x)
m, n = x.shape
if p < 0.5:
p = 1.0 - p
minus = True
else:
minus = False
zp = norm.ppf(p)
t = nct.ppf(g, df=n-1., nc=np.sqrt(n)*zp)
k = t / np.sqrt(n)
if minus:
return x.mean(axis=1) - (k*x.std(axis=1, ddof=1))
else:
return x.mean(axis=1) + (k*x.std(axis=1, ddof=1))
def lognormal(x, p, g):
r"""
Compute one-side tolerance bound using the lognormal distribution.
Computes the one-sided tolerance interval using the lognormal distribution.
This just performs a ln and exp transformations of the normal distribution.
Parameters
----------
x : ndarray (1-D, or 2-D)
Numpy array of samples to compute the tolerance bound. Assumed data
type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
number of sets of sample size n.
p : float
Percentile for the TI to estimate.
g : float
Confidence level where g > 0. and g < 1.
Returns
-------
ndarray (1-D)
The normal distribution toleranace bound.
Examples
--------
Estimate the 10th percentile lower bound with 95% confidence of the
following 100 random samples from a lognormal distribution.
>>> import numpy as np
>>> import toleranceinterval as ti
>>> x = np.random.random(100)
>>> lb = ti.oneside.lognormal(x, 0.1, 0.95)
Estimate the 90th percentile upper bound with 95% confidence of the
following 100 random samples from a lognormal distribution.
>>> ub = ti.oneside.lognormal(x, 0.9, 0.95)
"""
x = numpy_array(x) # check if numpy array, if not make numpy array
x = assert_2d_sort(x)
return np.exp(normal(np.log(x), p, g))
def non_parametric(x, p, g):
r"""
Compute one-side tolerance bound using traditional non-parametric method.
Computes a tolerance interval for any percentile, confidence level, and
number of samples using the traditional non-parametric method [1] [2].
This assumes that the true distribution is continuous.
Parameters
----------
x : ndarray (1-D, or 2-D)
Numpy array of samples to compute the tolerance bound. Assumed data
type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
number of sets of sample size n.
p : float
Percentile for the TI to estimate.
g : float
Confidence level where g > 0. and g < 1.
Returns
-------
ndarray (1-D)
The non-parametric toleranace interval bound. Returns np.nan if a
non-parametric tolerance interval does not exist for the combination
of percentile, confidence level, and number of samples.
Notes
-----
The non-parametric tolerance inteval only exists for certain combinations
of percentile, confidence level, and number of samples.
References
----------
[1] <NAME>., <NAME>., & <NAME>. (2017). Learning-based robust
optimization: Procedures and statistical guarantees. ArXiv Preprint
ArXiv:1704.04342.
[2] 9.5.5.3 Nonparametric Procedure. (2017). In MMPDS-12 : Metallic
materials properties development and standardization. Battelle
Memorial Institute.
Examples
--------
Estimate the 10th percentile bound with 95% confidence of the
following 300 random samples from a normal distribution.
>>> import numpy as np
>>> import toleranceinterval as ti
>>> x = np.random.random(300)
>>> bound = ti.oneside.normal(x, 0.1, 0.95)
Estimate the 90th percentile bound with 95% confidence of the
following 300 random samples from a normal distribution.
>>> bound = ti.oneside.normal(x, 0.9, 0.95)
"""
x = numpy_array(x) # check if numpy array, if not make numpy array
x = assert_2d_sort(x)
m, n = x.shape
r = np.arange(0, n)
if p < 0.5:
left_tail = True
confidence_index = binom.sf(r, n, p)
else:
left_tail = False
confidence_index = binom.cdf(r, n, p)
boolean_index = confidence_index >= g
if boolean_index.sum() > 0:
if left_tail:
return x[:, np.max(np.where(boolean_index))]
else:
return x[:, np.min(np.where(boolean_index))]
else:
return np.nan*np.ones(m)
def hanson_koopmans(x, p, g, j=-1, method='secant', max_iter=200, tol=1e-5,
step_size=1e-4):
r"""
Compute left tail probabilities using the HansonKoopmans method [1].
Runs the HansonKoopmans solver object to find the left tail bound for any
percentile, confidence level, and number of samples. This assumes the
lowest value is the first order statistic, but you can specify the index
of the second order statistic as j.
Parameters
----------
x : ndarray (1-D, or 2-D)
Numpy array of samples to compute the tolerance bound. Assumed data
type is np.float. Shape of (m, n) is assumed for 2-D arrays with m
number of sets of sample size n.
p : float
Percentile for lower limits when p < 0.5 and upper limits when
p >= 0.5.
g : float
Confidence level where g > 0. and g < 1.
j : int, optional
Index of the second value to use for the second order statistic.
Default is the last value j = -1 = n-1 if p < 0.5. If p >= 0.5,
the second index is defined as index=n-j-1, with default j = n-1.
method : string, optional
Which rootfinding method to use to solve for the Hanson-Koopmans
bound. Default is method='secant' which appears to converge
quickly. Other choices include 'newton-raphson' and 'halley'.
max_iter : int, optional
Maximum number of iterations for the root finding method.
tol : float, optional
Tolerance for the root finding method to converge.
step_size : float, optional
Step size for the secant solver. Default step_size = 1e-4.
Returns
-------
ndarray (1-D)
The Hanson-Koopmans toleranace interval bound as np.float with shape m.
Returns np.nan if the rootfinding method did not converge.
Notes
-----
The Hanson-Koopmans bound assumes the true distribution belongs to the
log-concave CDF class of distributions [1].
This implemnation will | |
import pika.adapters
import pika
import uuid
import json
EXCHANGE = 'chatexchange'
EXCHANGE_TYPE = 'topic'
BINDING_KEY_DEFAULT = 'public.*'
PORT = 5672
# few utility print functions
def pi(msg):
print '\n[RabbitMQClient] : inside ' + msg.upper() + '()'
def pc(msg):
print '[RabbitMQClient] : Calling ' + msg.upper() + '() function...'
def ps(msg):
print '[RabbitMQClient] : Call ' + msg.upper() + '() successful'
def pr(msg):
print '[RabbitMQClient] : Returning from ' + msg.upper() + '()\n'
def pp(self, msg):
print '[RabbitMQClient] : ' + msg.upper() + ' PARAMETERS : '
print 'self._connection = ', self._connection
print 'self._connected = ', self._connected
print 'self._connecting = ', self._connecting
print 'self._channel = ', self._channel
print 'self._closing = ', self._closing
print 'self._closed = ', self._closed
print 'self._consumer_tag = ', self._consumer_tag
print 'self._deliveries = ', self._deliveries
print 'self._acked = ', self._acked
print 'self._nacked = ', self._nacked
print 'self._message_number = ', self._message_number
print 'self._credentials = ', self._credentials
print 'self._parameters = ', self._parameters
print 'self._queue = ', self._queue
print 'self.websocket = ', self.websocket
print 'self._status = ', self._status
print 'self._person = ', self._person
print 'self._clientid = ', self._clientid
print 'self._participants = ', self._participants
class RabbitMqClient(object):
"""
This is a RabbitMQ Client using the TornadoConnection Adapter that will
handle unexpected interactions with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
It alos uses delivery confirmations and illustrates one way to keep track of
messages that have been sent and if they've been confirmed by RabbitMQ.
"""
def __init__(self):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
"""
pi('__init__')
self._connection = None
self._connected = False
self._connecting = False
self._channel = None
self._closing = False
self._closed = False
self._consumer_tag = None
self._deliveries = []
self._acked = 0
self._nacked = 0
self._message_number = 0
self._credentials = pika.PlainCredentials('guest', 'guest')
self._parameters = pika.ConnectionParameters(host='localhost',
port=PORT,
virtual_host='/',
credentials=self._credentials)
self._queue = 'queue-' + str(uuid.uuid4())
self.websocket = None
self._status = 0
self._person = None
self._clientid = None
self._participants = 0
pp(self, '__INIT__')
pr('__init__')
def connect(self):
"""This method connects to RabbitMQ via the Torando Connectoin Adapter, returning the
connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
pi('connect')
if self._connecting:
print 'RabbitMQClient: Already connecting to RabbitMQ'
return
print 'RabbitMQClient: Connecting to RabbitMQ on localhost:5672, Object: %s' % (self,)
self._connecting = True
pp(self, 'CONNECT')
return pika.adapters.TornadoConnection(parameters=self._parameters,
on_open_callback=self.on_connection_opened,
stop_ioloop_on_close=False)
def on_connection_opened(self, connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type connection: pika.adapters.TornadoConnection
"""
pi('on_connection_opened')
self._status = 1
self._connected = True
self._connection = connection
pc('add_on_connection_close_callback')
self.add_on_connection_close_callback()
ps('add_on_connection_close_callback')
pc('open_channel')
self.open_channel()
ps('open_channel')
pp(self, 'ON_CONNECTION_OPENED')
pr('on_connection_opened')
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
pi('add_on_connection_close_callback')
pc('self._connection.add_on_close_callback')
self._connection.add_on_close_callback(callback_method=self.on_connection_closed)
ps('self._connection.add_on_close_callback')
pp(self, 'ADD_ON_CONNECTION_CLOSE_CALLBACK')
pr('add_on_connection_close_callback')
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
pi('on_connection_closed')
self._channel = None
self._connecting = False
self._connected = False
self._status = 0
if self._closing:
print 'connection already closing'
return
else:
print "Connection closed, reopening in 5 seconds: reply_code : [%d] : reply_text : %s " % (reply_code, reply_text)
pc('self._connection.add_timeout')
self._connection.add_timeout(5, self.reconnect)
ps('self._connection.add_timeout')
pp(self, 'on_connection_closed')
pr('on_connection_closed')
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
pi('reconnect')
if not self._closing:
# Create a new connection
pc('self.connect')
self._connection = self.connect()
ps('self.connect')
pp(self, 'reconnect')
pr('reconnect')
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
pi('close_connection')
print 'closing connection'
if self._closing:
print 'connection is already closing...'
return
self._closing = True
print 'invoking connection.close() method'
pc('self._connection.close')
self._connection.close()
ps('self._connection.close')
print 'connnection closed'
self._connecting = False
self._connected = False
if self._channel:
self._channel = None
if self._connection:
self._connection = None
if self._consumer_tag:
self._consumer_tag = None
if self._queue:
self._queue = None
if self.websocket:
self.websocket = None
self._parameters = None
self._credentials = None
self._status = 0
self._closed = True
self._person = None
pp(self, 'close_connection')
pr('close_connection')
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
pi('open_channel')
print 'Creating a new channel for connection : ', self._connection
pc('self._connection.channel')
self._channel = self._connection.channel(on_open_callback=self.on_channel_open)
ps('self._connection.channel')
pp(self, 'open_channel')
pr('open_channel')
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
pi('on_channel_open')
self._status = 2
print 'Channel opened'
self._channel = channel
pc('self.add_on_channel_close_callback')
self.add_on_channel_close_callback()
ps('self.add_on_channel_close_callback')
pc('self.setup_exchange')
self.setup_exchange()
ps('self.setup_exchange')
pp(self, 'on_channel_open')
pr('on_channel_open')
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
pi('close_channel')
print 'Closing the channel... '
pc('self._channel.close')
self._status = 1
self._channel.close()
ps('self._channel.close')
# self._channel = None
print 'channel closed'
if self._channel:
self._channel = None
pp(self, "close_channel")
pr('close_channel')
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
pi('add_on_channel_close_callback')
print 'Adding channel close callback'
pc('self._channel.add_on_close_callback')
self._channel.add_on_close_callback(self.on_channel_closed)
ps('self._channel.add_on_close_callback')
pp(self, 'add_on_channel_close_callback')
pr('add_on_channel_close_callback')
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
pi('on_channel_closed')
print "Channel %i was closed: reply_code : [%d] reply_text : %s " % (channel, reply_code, reply_text)
self._status = 1
print 'now closing connection invoked..'
pc('self.on_channel_closed')
self.close_connection()
ps('self.on_channel_closed')
pp(self, 'on_channel_closed')
pr('on_channel_closed')
def setup_exchange(self):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
pi('setup_exchange')
print 'Declaring exchange : ', EXCHANGE
pc('self._channel.exchange_declare')
self._channel.exchange_declare(exchange=EXCHANGE,
exchange_type=EXCHANGE_TYPE,
durable=True,
auto_delete=False,
nowait=False,
callback=self.on_exchange_declareok)
ps('self._channel.exchange_declare')
pp(self, 'setup_exchange')
pr('setup_exchange')
def on_exchange_declareok(self, frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
pi('on_exchange_declareok')
self._status = 3
print 'Exchange declared'
pc('self.setup_queue')
self.setup_queue()
ps('self.setup_queue')
pp(self, 'on_exchange_declareok')
pr('on_exchange_declareok')
def setup_queue(self):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
pi('setup_queue')
print 'Declaring queue : for channel object : ', self._channel
pc('self._channel.queue_declare')
self._channel.queue_declare(queue=self._queue,
durable=True,
exclusive=False,
auto_delete=True,
nowait=False,
arguments=None,
callback=self.on_queue_declareok)
ps('self._channel.queue_declare')
pp(self, 'setup_queue')
pr('setup_queue')
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. mand is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
pi('on_queue_declareok')
self._status = 4
print 'calling bind_queue method with default BINDING_KEY'
pc('self.bind_queue')
self.bind_queue(BINDING_KEY_DEFAULT)
ps('self.bind_queue')
pp(self, 'on_queue_declareok')
pr('on_queue_declareok')
def bind_queue(self, binding_key):
"""In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command.
:param string binding_key: The routing_key argument
"""
pi('bind_queue')
print 'Binding %s to | |
0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc >= 7 and yc >= 7:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up or left: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'left':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc >= 7 and yc <= 2:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter down or left: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'down' and zc != 'left':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
else:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, down, left, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'down' and zc != 'left' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
wait()
if zc == 'up':
yc2 = yc - 1
yc3 = yc - 2
yc4 = yc - 3
if player_board[yc][xc] == 'O' and player_board[yc2][xc] == 'O' and player_board[yc3][xc] == 'O' and player_board[yc4][xc] == 'O':
player_board[yc][xc] = '4'
player_board[yc2][xc] = '4'
player_board[yc3][xc] = '4'
player_board[yc4][xc] = '4'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
elif zc == 'down':
yc2 = yc + 1
yc3 = yc + 2
yc4 = yc + 3
if player_board[yc][xc] == 'O' and player_board[yc2][xc] == 'O' and player_board[yc3][xc] == 'O' and player_board[yc4][xc] == 'O':
player_board[yc][xc] = '4'
player_board[yc2][xc] = '4'
player_board[yc3][xc] = '4'
player_board[yc4][xc] = '4'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
elif zc == 'right':
xc2 = xc + 1
xc3 = xc + 2
xc4 = xc + 3
if player_board[yc][xc] == 'O' and player_board[yc][xc2] == 'O' and player_board[yc][xc3] == 'O' and player_board[yc][xc4] == 'O':
player_board[yc][xc] = '4'
player_board[yc][xc2] = '4'
player_board[yc][xc3] = '4'
player_board[yc][xc4] = '4'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
else:
xc2 = xc - 1
xc3 = xc - 2
xc4 = xc - 3
if player_board[yc][xc] == 'O' and player_board[yc][xc2] == 'O' and player_board[yc][xc3] == 'O' and player_board[yc][xc4] == 'O':
player_board[yc][xc] = '4'
player_board[yc][xc2] = '4'
player_board[yc][xc3] = '4'
player_board[yc][xc4] = '4'
break
else:
screen.addstr('Your coordinates for this ship are colliding with the coordinates for another ship. Please re-enter the coordinates carefully and pick a direction which won\'t collide with another ship.')
wait()
continue
screen.clear()
wait()
def make_player_ship5():
screen.clear()
screen.refresh()
screen.addstr(0, 0, 'Now we will generate your fifth ship. Please answer the following questions correctly, because if your answer doesn\'t meet our set requirements, the question will be asked again.\n')
screen.refresh()
wait()
while True:
while True:
try:
screen.clear()
prompt_str = 'Where do you want the first x coordinate of this ship to be? Enter a number between 0 to 9: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
x = int(screen.getstr(0, l))
while x >= 10 or x <= -1:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
x = int(screen.getstr(0, l))
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
wait()
while True:
try:
prompt_str = 'Where do you want the first y coordinate of this ship to be? Enter a number between 0 to 9: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
y = int(screen.getstr(0, l))
while y >= 10 or y <= -1:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
y = int(screen.getstr(0, l))
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a number between 0 and 9.')
screen.refresh()
wait()
screen.clear()
wait()
xc = x
yc = 9 - y
if xc <= 3 and yc != 1 and yc != 8 and yc != 0 and yc != 9 and yc != 2 and yc != 7 and yc != 3 and yc != 6:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, down, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'down' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif xc >= 6 and yc != 1 and yc != 8 and yc != 0 and yc != 9 and yc != 2 and yc != 7 and yc != 3 and yc != 6:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, down, or left: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'down' and zc != 'left':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif yc <= 3 and xc != 2 and xc != 7 and xc != 1 and xc != 8 and xc != 0 and xc != 9 and xc != 3 and xc != 6:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter down, left, or right: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'left' and zc != 'down' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
elif yc >= 6 and xc != 2 and xc != 7 and xc != 1 and xc != 8 and xc != 0 and xc != 9 and xc != 3 and xc != 6:
while True:
try:
prompt_str = 'Which direction do you want your ship to extend in? Enter up, left, or rights: '
l = len(prompt_str)
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
while zc != 'up' and zc != 'left' and zc != 'right':
screen.clear()
screen.addstr(0, 0, 'You didn\'t enter a direction that was allowed.')
screen.refresh()
wait()
screen.clear()
screen.addstr(0, 0, prompt_str)
zc = screen.getstr(0, l).lower()
break
except:
screen.clear()
screen.addstr(0, | |
at location: " + str(has_substring)
self._sentence = self._sentence.replace("XXX", str(int(self._log_person_investment)))
has_substring = self._sentence.find("YYY")
if(has_substring != -1):
print "[6] Found the substring 'YYY' at location: " + str(has_substring)
self._sentence = self._sentence.replace("YYY", str(int(self._log_robot_investment)))
print "[6] Saying: '" + self._sentence + "'"
#It says the sentence generated above only if
#the valued returned by the person is different from zero.
self.myPuppet.say_something(str(self._sentence))
else:
print "[6] Saying Nothing because the sentence in the XML file is '" + str(self._sentence) + "'"
if self.myParser._gaze_list[self._log_trial] == "True":
print "[6] looking to the monitor..."
#The robot looks to the monitor (thinking what to do)
#self.myPuppet.enable_face_tracking(False) #disable face tracking
self.myPuppet.look_to("HeadPitch", 35.0, SPEED) #angle(radians) + speed
self.STATE_MACHINE = 7 #next state
#STATE-7 The Banker robot gives a reward
if self.STATE_MACHINE == 7:
if self.myParser._gaze2_list[self._log_trial] == "True":
print "[7] Looking to the screen Robot 2"
self.myPuppet_2.look_to("HeadPitch", 35.0, SPEED)
time.sleep(0.5)
#self.myPuppet_2.enable_face_tracking(False) #enables face tracking
print "[7] The banker is thinking (it takes time)..."
time.sleep(random.randint(2, 4))
print "[7] Waiting for Banker Robot reward..."
#Updating the multiplication values
self._log_player_b_investment = int(self.myParser._binv_list[self._log_trial]) # Player B return for the robot
if(self._log_player_b_investment<0): self._log_player_b_investment = 0 #equal to zero if negative
# Pointing or not
if self.myParser._pointing2_list[self._log_trial] == "True":
print "[7] pointing == True"
#Sort a random value and use it to move the arm
random_value = random.random()
if (random_value >= 0.0 and random_value <= 0.5):
self.myPuppet_2.left_arm_pointing(True, SPEED)
#right arm movement
elif (random_value > 0.5 and random_value <= 1.0):
self.myPuppet_2.right_arm_pointing(True, SPEED)
#Reset the arms (if they have been moved)
print "[7] pointing == False"
self.myPuppet_2.right_arm_pointing(False, SPEED)
self.myPuppet_2.left_arm_pointing(False, SPEED)
#Update the TOTAL
print "[7] The Banker invested: " + str(self._log_player_b_investment)
#The total of the person in the single round is given from
#the amount not invested + the money that player b gave back (half of them)
self._log_person_total += (10-self._log_person_investment) + int(self._log_person_investment * 3.0 * float(self.myParser._bmf_list[self._log_trial]))
self._log_robot_total += (10-self._log_robot_investment) + int(self._log_player_b_investment) # TODO: take this value from CSV
local_string = "Player A: " + str(self._log_person_investment) + " and Player B: " + str(self._log_robot_investment) + '\n'
local_string += "The banker received: " + str(int(self._log_person_investment * 3.0)) + " from Player A"
local_string += " and received " + str(int(self._log_robot_investment * 3.0)) + " from Player B" + '\n'
local_string += "The banker returned to Player A: " + str(int(self._log_person_investment * 3.0 * float(self.myParser._bmf_list[self._log_trial]))) + '\n'
local_string += "The banker returned to Player B: " + str(int(self._log_player_b_investment)) + '\n' # TODO: change this variable and take it from XML
local_string += "Please press START to begin a new round..." + '\n'
#total, pinv, round_tot, rinv, rslider, text
self.emit(self.update_gui_signal, self._log_person_total, self._log_robot_total, local_string)
if self.myParser._gaze2_list[self._log_trial] == "True":
print "[7] Enabling face tracking Robot 2"
self.myPuppet_2.look_to("HeadPitch", 0, SPEED)
time.sleep(0.5)
#self.myPuppet_2.enable_face_tracking(True) #enables face tracking
print "[7] Switch to the next state"
self.STATE_MACHINE = 8 #next state
#STATE-8 Banker talks and says the reward to the players
if self.STATE_MACHINE == 8:
print "[8] Banker looks to participant"
self.myPuppet_2.look_to("HeadYaw", -20.0, SPEED)
time.sleep(0.5)
print "[8] The Banker says to player A what returned"
#Take a sentence from the XML
self._sentence = self.myParser._word2_list[self._log_trial]
self._sentence = str(self._sentence) #convert to string
#If the sentence in the XML file is equal to "." or "-" or "" it does not say anything.
if(self._sentence != "." and self._sentence != "" and self._sentence != "-"):
#Check if XXX is present and replace it
has_substring = self._sentence.find("XXX")
if(has_substring != -1):
print "[8] Found the substring 'XXX' at location: " + str(has_substring)
self._sentence = self._sentence.replace("XXX", str(self._log_person_investment))
has_substring = self._sentence.find("YYY")
if(has_substring != -1):
print "[8] Found the substring 'YYY' at location: " + str(has_substring)
self._sentence = self._sentence.replace("YYY", str(int(self._log_person_investment * 3.0 * float(self.myParser._bmf_list[self._log_trial]))))
self.myPuppet_2.say_something(str(self._sentence))
else:
print "[8] Saying Nothing because the sentence in the XML file is '" + str(self._sentence) + "'"
#Sleep between the two sentences
#time.sleep(3.0)
print "[8] Banker looks to participant"
self.myPuppet_2.look_to("HeadYaw", +20.0, SPEED)
time.sleep(0.5)
print "[8] The banker says to player B what returned"
#Take a sentence from the XML
self._sentence = self.myParser._word3_list[self._log_trial]
self._sentence = str(self._sentence) #convert to string
#If the sentence in the XML file is equal to "." or "-" or "" it does not say anything.
if(self._sentence != "." and self._sentence != "" and self._sentence != "-"):
#Check if XXX is present and replace it
has_substring = self._sentence.find("XXX")
if(has_substring != -1):
print "[8] Found the substring 'XXX' at location: " + str(has_substring)
self._sentence = self._sentence.replace("XXX", str(self._log_person_investment))
has_substring = self._sentence.find("YYY")
if(has_substring != -1):
print "[8] Found the substring 'YYY' at location: " + str(has_substring)
self._sentence = self._sentence.replace("YYY", str(int(self._log_person_investment * 3.0 * float(self.myParser._bmf_list[self._log_trial]))))
self.myPuppet_2.say_something(str(self._sentence))
else:
print "[8] Saying Nothing because the sentence in the XML file is '" + str(self._sentence) + "'"
print "[8] Banker looks to the monitor"
self.myPuppet_2.look_to("HeadYaw", 0.0, SPEED)
self.myPuppet_2.look_to("HeadPitch", 35.0, SPEED) #angle(radians) + speed
time.sleep(0.5)
self.STATE_MACHINE = 9 #next state
#STATE-9 Saving in the logbook
if self.STATE_MACHINE == 9:
print "[9] Saving the trial in the logbook"
self.logger.AddLine(self._log_trial+1, self._log_person_investment, self._log_robot_investment, self._log_player_b_investment,
self._log_pmf, self._log_bmf, self._log_person_total, self._log_gaze, self._log_pointing, self._log_timer)
print ("[9] trial, person_investment, robot_investment, person_investment_second, log_robot_investment_second, player_b_investment, pmf, bmf, person_total, gaze, pointing, timer, timer_second")
print ("[9] " + str(self._log_trial+1) + "," + str(self._log_person_investment) + "," + str(self._log_robot_investment) +
"," + "," + str(self._log_pmf) + "," + str(self._log_bmf) + "," + str(self._log_person_total) + "," + str(self._log_gaze) +
"," + str(self._log_pointing) + "," + str(self._log_timer) )
if self._log_trial+1 != self.myParser._size:
self.STATE_MACHINE = 10 #cycling to state 12
self.emit(self.enable_components_gui_signal, True, False, False) #Enable the Start Button
self._log_trial = self._log_trial + 1
elif self._log_trial+1 == self.myParser._size:
self.STATE_MACHINE = 11 #experiment finished
#STATE-10 Waiting for the subject pressing START
if self.STATE_MACHINE == 10:
if self._start_pressed == True:
self._start_pressed = False
print "[10] Start pressed..."
self.emit(self.enable_components_gui_signal, False, False, False)
self.STATE_MACHINE = 2 #cycling to state 2
time.sleep(1)
#STATE-11 Final state is called to shutdown the robot
if self.STATE_MACHINE == 11:
print "[11] The game is finished"
self._xml_uploaded = False #reset status variable
self._start_pressed = False
self._log_trial = 0
self.STATE_MACHINE = 0 #cycling to state 0
#total, player_investment, round_total, your_investment, robot_investment
local_string = "Player A score is: " + str(self._log_person_total) + '\n'
local_string += "Player B score is: " + str(self._log_robot_total) + '\n'
local_string += "The game is finished. Thank you..."
self.myPuppet.say_something("Thank you, It was nice to play with you.")
#total, player_investment, round_total, robot_investment, text_label=""
self.emit(self.update_gui_signal, 0, 0, local_string)
self.emit(self.enable_components_gui_signal, False, False, False) #GUI components disabled
time.sleep(5)
def start_experiment(self):
self._start_pressed = True
def confirm(self, person_investment):
self._confirm_pressed = True
self._log_person_investment = int(person_investment)
def confirm_robot(self, robot_investment):
self._confirm_pressed_robot = True
self._log_robot_investment = int(robot_investment)
def ip(self, ip_string, port_string, ip_string_2, port_string_2):
print "IP: " + str(ip_string)
is_first_connected = False
is_second_connected = False
try:
self.myPuppet = nao.Puppet(ip_string, port_string, True)
self.emit(self.yes_robot_signal)
except Exception,e:
print "\nERROR: Impossible to find the FIRST robot!\n"
print "Error was: ", e
self.emit(self.no_robot_signal)
self._robot_connected=False
try:
self.myPuppet_2 = nao.Puppet(ip_string_2, port_string_2, True)
self.emit(self.yes_robot_signal)
except Exception,e:
print "\nERROR: Impossible to find the SECOND robot!\n"
print "Error was: ", e
self.emit(self.no_robot_signal)
self._robot_connected=False
# Both connected
self._robot_connected=True
def xml(self, path):
print("Looking for external files... ")
if not os.path.isfile(str(path)):
print("\n# ERROR: I cannot find the XML file. The programm will be stopped!\n")
self._xml_uploaded = False
return
print("Initializing XML Parser... ")
try:
self.myParser.LoadFile(str(path))
self.myParser.parse_experiment_list()
self._xml_uploaded = True
except:
self.emit(self.bad_xml_signal)
print("\n # ERROR: Impossible to read the XML file! \n")
self._xml_uploaded = False
def wake(self, state):
if state == True:
self.myPuppet.wake_up()
self.myPuppet_2.wake_up()
else:
self.myPuppet.rest()
self.myPuppet_2.rest()
def face_tracking(self, state):
self.myPuppet.enable_face_tracking(state)
self.myPuppet_2.enable_face_tracking(state)
def session_info_update(self, info1, info2, info3):
my_string = str(info1) + "," + str(info2) + "," + str(info3)
print("SESSION INFO: ", info1, info2, info3)
self._log_first_line = my_string
self._session_info_given = True
def stop(self):
self.stopped = 1
def __del__(self):
self.wait()
## Class ExampleApp
#
# It is a GUI class created in pyQT
# and receive signals from the GUI
#
class ExampleApp(QtGui.QMainWindow, design.Ui_MainWindow):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.btnBrowse.clicked.connect(self.browse_folder) # When the button is pressed execute browse_folder function
#self.btnStartExperiment.clicked.connect(lambda: self.start_experiment(1))
self.btnStartExperiment.clicked.connect(self.start_experiment)
self.btnConnectToNao.clicked.connect(self.connect_pressed)
self.btnWakeUp.clicked.connect(self.wake_up_pressed)
self.btnRest.clicked.connect(self.rest_pressed)
self.btnFaceTrackingEnable.clicked.connect(lambda: self.face_tracking_pressed(True))
self.btnFaceTrackingDisable.clicked.connect(lambda: self.face_tracking_pressed(False))
self.btnSessionInfoConfirm.clicked.connect(self.session_info_pressed)
#Buttons investment
| |
<filename>pyccel/codegen/compiling/compilers.py<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------------------#
# This file is part of Pyccel which is released under MIT License. See the LICENSE file or #
# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. #
#------------------------------------------------------------------------------------------#
"""
Module handling everything related to the compilers used to compile the various generated files
"""
import json
import os
import shutil
import subprocess
import sysconfig
import warnings
from filelock import FileLock
from pyccel import __version__ as pyccel_version
from pyccel.errors.errors import Errors
errors = Errors()
# Set correct deployment target if on mac
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = mac_target
python_version = sysconfig.get_python_version()
def different_version(compiler):
"""
Determine whether the specified compiler matches or differs from
the expected version of pyccel and python
"""
return compiler['pyccel_version'] != pyccel_version or \
compiler['python_version'] != python_version
compilers_folder = os.path.join(os.path.dirname(__file__),'..','..','compilers')
with FileLock(compilers_folder+'.lock'):
# TODO: Add an additional search location for user provided compiler files
available_compilers = {f[:-5]:json.load(open(os.path.join(compilers_folder,f))) for f in os.listdir(compilers_folder)
if f.endswith('.json')}
if len(available_compilers)==0 or \
different_version(next(iter(available_compilers.values()))):
from pyccel.compilers.generate_default import generate_default
generate_default()
available_compilers = {f[:-5]:json.load(open(os.path.join(compilers_folder,f))) for f in os.listdir(compilers_folder)
if f.endswith('.json')}
vendors = {c['family'] for c in available_compilers.values()}
sorted_compilers = {(c['family'],c['language']) : c for c in available_compilers.values()}
#------------------------------------------------------------
class Compiler:
"""
Class which handles all compiler options
Parameters
----------
name : str
Name of the family of compilers
language : str
Language that we are translating to
debug : bool
Indicates whether we are compiling in debug mode
"""
__slots__ = ('_debug','_info')
def __init__(self, vendor : str, language : str, debug=False):
if language=='python':
return
if vendor.endswith('.json') and os.path.exists(vendor):
self._info = json.load(open(vendor))
if language != self._info['language']:
warnings.warn(UserWarning("Language does not match compiler. Using GNU compiler"))
self._info = sorted_compilers[('GNU',language)]
else:
if vendor not in vendors:
raise NotImplementedError("Unrecognised compiler vendor : {}".format(vendor))
try:
self._info = sorted_compilers[(vendor,language)]
except KeyError as e:
raise NotImplementedError("Compiler not available") from e
self._debug = debug
def _get_exec(self, accelerators):
# Get executable
exec_cmd = self._info['mpi_exec'] if 'mpi' in accelerators else self._info['exec']
if shutil.which(exec_cmd) is None:
errors.report("Could not find compiler ({})".format(exec_cmd),
severity='fatal')
return exec_cmd
def _get_flags(self, flags = (), accelerators = ()):
"""
Collect necessary compile flags
Parameters
----------
flags : iterable of str
Any additional flags requested by the user
/ required by the file
accelerators : iterable or str
Accelerators used by the code
"""
flags = list(flags)
if self._debug:
flags.extend(self._info.get('debug_flags',()))
else:
flags.extend(self._info.get('release_flags',()))
flags.extend(self._info.get('general_flags',()))
# M_PI is not in the standard
#if 'python' not in accelerators:
# # Python sets its own standard
# flags.extend(self._info.get('standard_flags',()))
for a in accelerators:
flags.extend(self._info.get(a,{}).get('flags',()))
return flags
def _get_property(self, key, prop = (), accelerators = ()):
"""
Collect necessary compile property
Parameters
----------
property : iterable of str
Any additional values of the property
requested by the user / required by the file
accelerators : iterable or str
Accelerators used by the code
"""
# Use dict keys as an ordered set
prop = dict.fromkeys(prop)
prop.update(dict.fromkeys(self._info.get(key,())))
for a in accelerators:
prop.update(dict.fromkeys(self._info.get(a,{}).get(key,())))
return prop.keys()
def _get_includes(self, includes = (), accelerators = ()):
"""
Collect necessary compile include directories
Parameters
----------
includes : iterable of str
Any additional include directories requested by the user
/ required by the file
accelerators : iterable or str
Accelerators used by the code
"""
return self._get_property('includes', includes, accelerators)
def _get_libs(self, libs = (), accelerators = ()):
"""
Collect necessary compile libraries
Parameters
----------
libs : iterable of str
Any additional libraries requested by the user
/ required by the file
accelerators : iterable or str
Accelerators used by the code
"""
return self._get_property('libs', libs, accelerators)
def _get_libdirs(self, libdirs = (), accelerators = ()):
"""
Collect necessary compile library directories
Parameters
----------
libdirs : iterable of str
Any additional library directories
requested by the user / required by the file
accelerators : iterable or str
Accelerators used by the code
"""
return self._get_property('libdirs', libdirs, accelerators)
def _get_dependencies(self, dependencies = (), accelerators = ()):
"""
Collect necessary dependencies
Parameters
----------
dependencies : iterable of str
Any additional dependencies required by the file
accelerators : iterable or str
Accelerators used by the code
"""
return self._get_property('dependencies', dependencies, accelerators)
@staticmethod
def _insert_prefix_to_list(lst, prefix):
"""
Add a prefix into a list. E.g:
>>> lst = [1, 2, 3]
>>> _insert_prefix_to_list(lst, 'num:')
['num:', 1, 'num:', 2, 'num:', 3]
Parameters
----------
lst : iterable
The list into which the prefix is inserted
prefix : str
The prefix
"""
lst = [(prefix, i) for i in lst]
return [f for fi in lst for f in fi]
def _get_compile_components(self, compile_obj, accelerators = ()):
"""
Provide all components required for compiling
Parameters
----------
compile_obj : CompileObj
Object containing all information about the object to be compiled
accelerators : iterable of str
Name of all tools used by the code which require additional flags/includes/etc
Results
-------
exec_cmd : str
The command required to run the executable
inc_flags : iterable of strs
The include directories required to compile
libs_flags : iterable of strs
The libraries required to compile
libdirs_flags : iterable of strs
The directories containing libraries required to compile
m_code : iterable of strs
The objects required to compile
"""
# get includes
includes = self._get_includes(compile_obj.includes, accelerators)
inc_flags = self._insert_prefix_to_list(includes, '-I')
# Get dependencies (.o/.a)
m_code = self._get_dependencies(compile_obj.extra_modules, accelerators)
# Get libraries and library directories
libs = self._get_libs(compile_obj.libs, accelerators)
libs_flags = [s if s.startswith('-l') else '-l{}'.format(s) for s in libs]
libdirs = self._get_libdirs(compile_obj.libdirs, accelerators)
libdirs_flags = self._insert_prefix_to_list(libdirs, '-L')
exec_cmd = self._get_exec(accelerators)
return exec_cmd, inc_flags, libs_flags, libdirs_flags, m_code
def compile_module(self, compile_obj, output_folder, verbose = False):
"""
Compile a module
Parameters
----------
compile_obj : CompileObj
Object containing all information about the object to be compiled
output_folder : str
The folder where the result should be saved
verbose : bool
Indicates whether additional output should be shown
"""
accelerators = compile_obj.accelerators
# Get flags
flags = self._get_flags(compile_obj.flags, accelerators)
flags.append('-c')
# Get includes
includes = self._get_includes(compile_obj.includes, accelerators)
inc_flags = self._insert_prefix_to_list(includes, '-I')
# Get executable
exec_cmd = self._get_exec(accelerators)
if self._info['language'] == 'fortran':
j_code = (self._info['module_output_flag'], output_folder)
else:
j_code = ()
cmd = [exec_cmd, *flags, *inc_flags,
compile_obj.source, '-o', compile_obj.module_target,
*j_code]
compile_obj.acquire_lock()
try:
self.run_command(cmd, verbose)
finally:
compile_obj.release_lock()
def compile_program(self, compile_obj, output_folder, verbose = False):
"""
Compile a program
Parameters
----------
compile_obj : CompileObj
Object containing all information about the object to be compiled
output_folder : str
The folder where the result should be saved
verbose : bool
Indicates whether additional output should be shown
"""
accelerators = compile_obj.accelerators
# get flags
flags = self._get_flags(compile_obj.flags, accelerators)
# Get compile options
exec_cmd, includes, libs_flags, libdirs_flags, m_code = \
self._get_compile_components(compile_obj, accelerators)
if self._info['language'] == 'fortran':
j_code = (self._info['module_output_flag'], output_folder)
else:
j_code = ()
if compile_obj.is_module:
flags.append('-c')
cmd = [exec_cmd, *flags, *includes, *libdirs_flags,
*m_code, compile_obj.module_target,
'-o', compile_obj.target,
*libs_flags, *j_code]
compile_obj.acquire_lock()
try:
self.run_command(cmd, verbose)
finally:
compile_obj.release_lock()
return compile_obj.target
def compile_shared_library(self, compile_obj, output_folder, verbose = False, sharedlib_modname=None):
"""
Compile a module to a shared library
Parameters
----------
compile_obj : CompileObj
Object containing all information about the object to be compiled
output_folder : str
The folder where the result should be saved
verbose : bool
Indicates whether additional output should be shown
Returns
-------
file_out : str
Generated library name
"""
# Ensure python options are collected
accelerators = set(compile_obj.accelerators)
accelerators.remove('python')
# get flags
flags = self._get_flags(compile_obj.flags, accelerators)
accelerators.add('python')
# Collect compile information
exec_cmd, includes, libs_flags, libdirs_flags, m_code = \
self._get_compile_components(compile_obj, accelerators)
linker_libdirs_flags = ['-Wl,-rpath' if l == '-L' else l for l in libdirs_flags]
flags.insert(0,"-shared")
# Get name of file
ext_suffix = self._info['python']['shared_suffix']
sharedlib_modname = sharedlib_modname or compile_obj.python_module
file_out = os.path.join(compile_obj.source_folder, sharedlib_modname+ext_suffix)
cmd = [exec_cmd, *flags, *includes, *libdirs_flags, *linker_libdirs_flags,
*libs_flags, *m_code, compile_obj.module_target,
'-o', file_out]
compile_obj.acquire_lock()
try:
self.run_command(cmd, verbose)
finally:
compile_obj.release_lock()
return file_out
@staticmethod
def run_command(cmd, verbose):
"""
Run the provided command and collect the output
Parameters
----------
cmd : iterable
The command to run
verbose : bool
Indicates whether additional output should be shown
"""
cmd = [os.path.expandvars(c) for c in cmd]
if verbose:
print(' '.join(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if verbose and out:
print(out)
if p.returncode != 0:
err_msg = "Failed to build module"
err_msg += "\n" + err
raise RuntimeError(err_msg)
if err:
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 12:50:13 2018
@author: madcas
"""
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig = load_dataset_mnist() # Cargar la base de datos
#X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Visualizar uno de los ejemplos de la base de datos
index = 6
plt.figure(1)
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# imprimir características de la base de datos
#X_train = X_train_orig/255.
#X_test = X_test_orig/255.
X_train = X_train_orig[..., np.newaxis]/255.
X_test = X_test_orig[..., np.newaxis]/255.
#Y_train = convert_to_one_hot(Y_train_orig, 6).T
#Y_test = convert_to_one_hot(Y_test_orig, 6).T
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("Número de ejemplos de entrenamiento: " + str(X_train.shape[0]))
print ("Número de ejemplos de testing: " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Crea los placeholders para la sesión.
Argumentos:
n_H0 -- Escalar, height de la imagen de entrada
n_W0 -- Escalar, width de la imagen de entrada
n_C0 -- Escalar, Número de canales de entrada
n_y -- Escalar, Número de clases
Returna:
X -- placeholder para los datos de entrada, de tamaño [None, n_H0, n_W0, n_C0] y dtype "float"
Y -- placeholder para las etiquetas de entrada, de tamaño [None, n_y] y dtype "float"
"""
#### Haga su código acá ### (≈2 lines)
X = tf.placeholder(tf.float32, shape=(None, n_H0, n_W0, n_C0))
Y = tf.placeholder(tf.float32, shape=(None, n_y))
### Fin ###
return X, Y
#X, Y = create_placeholders(64, 64, 3, 6)
#print ("X = " + str(X))
#print ("Y = " + str(Y))
####### Esto debería dar el Resultado ################
#X = Tensor("Placeholder_2:0", shape=(?, 64, 64, 3), dtype=float32)
#Y = Tensor("Placeholder_3:0", shape=(?, 6), dtype=float32)
#######################################################
def initialize_parameters():
"""
Inicializa los parámetros (Pesos) para construir la red neuronal convolucional con tensorflow. El tamaño es
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
usar: tf.get_variable("W1", [, , , ], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
Returna:
parameters -- Un diccionario de tensores que contiene W1, W2
"""
tf.set_random_seed(1) #
#### Haga su código acá ### (≈2 lines)
W1 = tf.get_variable("W1", [3, 3, 1, 8], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
W2 = tf.get_variable("W2", [3, 3, 8, 16], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
W3 = tf.get_variable("W3", [3, 3, 16, 32], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
W4 = tf.get_variable("W4", [3, 3, 32, 64], initializer = tf.contrib.layers.xavier_initializer(seed = 0))
### Fin ###
parameters = {"W1": W1,
"W2": W2,
"W3": W3,
"W4": W4}
return parameters
#tf.reset_default_graph()
#
#with tf.Session() as sess_test:
# parameters = initialize_parameters()
# init = tf.global_variables_initializer()
# sess_test.run(init)
# print("W1 = " + str(parameters["W1"].eval()[1,1,1]))
# print("W2 = " + str(parameters["W2"].eval()[1,1,1]))
####### Esto debería dar el Resultado ################
#W1 = [ 0.00131723 0.1417614 -0.04434952 0.09197326 0.14984085 -0.03514394
# -0.06847463 0.05245192]
#W2 = [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058
# -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228
# -0.22779644 -0.1601823 -0.16117483 -0.10286498]
#######################################################
def forward_propagation(X, parameters):
"""
Implementa la propagación hacia adelante del modelo
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Argumentos:
X -- placeholder de entrada (ejemplos de entrenamiento), de tamaño (input size, number of examples)
parameters -- Diccionario que contiene los parámetros "W1", "W2" desde initialize_parameters
Retorna:
Z3 -- Salida de la última unidad LINEAR
"""
# Obtención de los pesos desde "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
W3 = parameters['W3']
W4 = parameters['W4']
#### Haga su código acá ###
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X, W1, strides = [1,1,1,1], padding = 'SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, stride 8, padding 'SAME'
# P1 = tf.nn.max_pool(A2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(A1,W2, strides = [1,1,1,1], padding = 'SAME')
# RELU
A2 = tf.nn.relu(Z2)
P1 = tf.nn.max_pool(A2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
Z3 = tf.nn.conv2d(P1, W3, strides = [1,1,1,1], padding = 'SAME')
# RELU
A3 = tf.nn.relu(Z3)
# MAXPOOL: window 8x8, stride 8, padding 'SAME'
# CONV2D: filters W2, stride 1, padding 'SAME'
Z4 = tf.nn.conv2d(A3,W4, strides = [1,1,1,1], padding = 'SAME')
# RELU
A4 = tf.nn.relu(Z4)
P2 = tf.nn.max_pool(A4, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME')
# FLATTEN
F = tf.contrib.layers.flatten(P2)
# # FULLY-CONNECTED without non-linear activation function (not not call softmax).
# # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
F2 = tf.contrib.layers.fully_connected(F, 20, None)
Z5 = tf.contrib.layers.fully_connected(F2, 10, None)
### Fin ###
return Z5
#tf.reset_default_graph()
#
#with tf.Session() as sess:
# np.random.seed(1)
# X, Y = create_placeholders(64, 64, 3, 6)
# parameters = initialize_parameters()
# Z3 = forward_propagation(X, parameters)
# init = tf.global_variables_initializer()
# sess.run(init)
# a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)})
# print("Z3 = " + str(a))
# print("Z3 = " + str(Z3.shape))
####### Esto debería dar el Resultado ################
#Z3 = [[ 1.4416984 -0.24909666 5.450499 -0.2618962 -0.20669907 1.3654671 ]
# [ 1.4070846 -0.02573211 5.08928 -0.48669922 -0.40940708 1.2624859 ]]
#Z3 = (?, 6)
#######################################################
def compute_cost(Z3, Y):
"""
Calcula la función de costo
Argumentos:
Z3 -- Salida del forward propagation (Salida de la última unidad LINEAR), de tamaño (6, Número de ejemplos)
Y -- placeholders con el vector de etiquetas "true", del mismo tamaño que Z3. Salidas correctas
Returns:
cost - Tensor de la función de costo
"""
#### Haga su código acá ### (≈2 lines)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
### Fin ###
return cost
#tf.reset_default_graph()
#
#with tf.Session() as sess:
# np.random.seed(1)
# X, Y = create_placeholders(64, 64, 3, 6)
# parameters = initialize_parameters()
# Z3 = forward_propagation(X, parameters)
# cost = compute_cost(Z3, Y)
# init = tf.global_variables_initializer()
# sess.run(init)
# a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)})
# print("cost = " + str(a))
####### Esto debería dar el Resultado ################
#cost = 4.6648693
#######################################################
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009, num_epochs = 50, minibatch_size = 16, print_cost = True):
"""
Implementa una Red Neuronal Convolucional de 3-Capas en Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Argumentos:
X_train -- Conjunto de entrenamiento, de tamaño (None, 64, 64, 3)
Y_train -- Etiquetas del conjunto de entrenamiento, de tamaño (None, n_y = 6)
X_test -- Conjunto de datos de Test, de tamaño (None, 64, 64, 3)
Y_test -- Etiquetas del conjunto de Test, de tamaño (None, n_y = 6)
learning_rate -- factor de aprendizaje en la optimización
num_epochs -- Número de epocas en el ciclo de optimización
minibatch_size -- Tamaño del minibatch
print_cost -- True: imprime el costo cada 100 epocas
Returna:
train_accuracy -- Número Real, Accuracy del conjunto de entrenamiento (X_train)
test_accuracy -- Número Real, Accuracy del conjunto de Test(X_test)
parameters -- parameters aprendidos por el modelo. Estos pueden ser usados para predecir.
"""
ops.reset_default_graph() # Permite correr nuevamente el modelo sin sobreescribir las tf variables
tf.set_random_seed(1) # (tensorflow seed)
seed = 3 #
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # Para almacenar el costo
# Crear los PlaceHolders
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
# Inicializar Parámetros
parameters = initialize_parameters()
# Forward propagation: Construir el forward propagation en el grafo de tensorflow
Z3 = forward_propagation(X, parameters)
# Cost function: Incluir la función de costo en el grafo de tensorflow
cost = compute_cost(Z3, Y)
# Backpropagation: Define el optimizador. Usar AdamOptimizer para minimizar el costo.
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Inicializar todas las variables globales
init = tf.global_variables_initializer()
# genera el objeto para guardar el modelo entrenado
saver = tf.train.Saver()
# Iniciar la sesión | |
<filename>plugins/jobs/girder_jobs/models/job.py
# -*- coding: utf-8 -*-
import datetime
from bson import json_util
from girder import events
from girder.constants import AccessType, SortDir
from girder.exceptions import ValidationException
from girder.models.model_base import AccessControlledModel
from girder.models.notification import Notification
from girder.models.token import Token
from girder.models.user import User
from ..constants import JobStatus, JOB_HANDLER_LOCAL
class Job(AccessControlledModel):
def initialize(self):
self.name = 'job'
compoundSearchIndex = (
('userId', SortDir.ASCENDING),
('created', SortDir.DESCENDING),
('type', SortDir.ASCENDING),
('status', SortDir.ASCENDING)
)
self.ensureIndices([(compoundSearchIndex, {}),
'created', 'parentId', 'celeryTaskId'])
self.exposeFields(level=AccessType.READ, fields={
'title', 'type', 'created', 'interval', 'when', 'status',
'progress', 'log', 'meta', '_id', 'public', 'parentId', 'asynchronous',
'updated', 'timestamps', 'handler', 'jobInfoSpec'})
self.exposeFields(level=AccessType.SITE_ADMIN, fields={'args', 'kwargs'})
def validate(self, job):
self._validateStatus(job['status'])
return job
def _validateStatus(self, status):
if not JobStatus.isValid(status):
raise ValidationException(
'Invalid job status %s.' % status, field='status')
def _validateChild(self, parentJob, childJob):
if str(parentJob['_id']) == str(childJob['_id']):
raise ValidationException('Child Id cannot be equal to Parent Id')
if childJob['parentId']:
raise ValidationException('Cannot overwrite the Parent Id')
def list(self, user=None, types=None, statuses=None,
limit=0, offset=0, sort=None, currentUser=None, parentJob=None):
"""
List a page of jobs for a given user.
:param user: The user who owns the job.
:type user: dict, 'all', 'none', or None.
:param types: job type filter.
:type types: array of type string, or None.
:param statuses: job status filter.
:type statuses: array of status integer, or None.
:param limit: The page limit.
:param limit: The page limit.
:param offset: The page offset.
:param sort: The sort field.
:param parentJob: Parent Job.
:param currentUser: User for access filtering.
"""
return self.findWithPermissions(
offset=offset, limit=limit, sort=sort, user=currentUser,
types=types, statuses=statuses, jobUser=user, parentJob=parentJob)
def findWithPermissions(self, query=None, offset=0, limit=0, timeout=None, fields=None,
sort=None, user=None, level=AccessType.READ,
types=None, statuses=None, jobUser=None, parentJob=None, **kwargs):
"""
Search the list of jobs.
:param query: The search query (see general MongoDB docs for "find()")
:type query: dict
:param offset: The offset into the results
:type offset: int
:param limit: Maximum number of documents to return
:type limit: int
:param timeout: Cursor timeout in ms. Default is no timeout.
:type timeout: int
:param fields: A mask for filtering result documents by key, or None to return the full
document, passed to MongoDB find() as the `projection` param.
:type fields: `str, list of strings or tuple of strings for fields to be included from the
document, or dict for an inclusion or exclusion projection`.
:param sort: The sort order.
:type sort: List of (key, order) tuples.
:param user: The user to check policies against.
:type user: dict or None
:param level: The access level. Explicitly passing None skips doing
permissions checks.
:type level: AccessType
:param types: job type filter.
:type types: array of type string, or None.
:param statuses: job status filter.
:type statuses: array of status integer, or None.
:param jobUser: The user who owns the job.
:type jobUser: dict, 'all', 'none', or None.
:param parentJob: Parent Job.
:returns: A pymongo Cursor or CommandCursor. If a CommandCursor, it
has been augmented with a count function.
"""
if query is None:
query = {}
# When user is 'all', no filtering by user, list jobs of all users.
if jobUser == 'all':
pass
# When user is 'none' or None, list anonymous user jobs.
elif jobUser == 'none' or jobUser is None:
query['userId'] = None
# Otherwise, filter by user id
else:
query['userId'] = jobUser['_id']
if types is not None:
query['type'] = {'$in': types}
if statuses is not None:
query['status'] = {'$in': statuses}
if parentJob:
query['parentId'] = parentJob['_id']
return super().findWithPermissions(
query, offset=offset, limit=limit, timeout=timeout, fields=fields,
sort=sort, user=user, level=level, **kwargs)
def cancelJob(self, job):
"""
Revoke/cancel a job. This simply triggers the jobs.cancel event and
sets the job status to CANCELED. If one of the event handlers
calls preventDefault() on the event, this job will *not* be put into
the CANCELED state.
:param job: The job to cancel.
"""
event = events.trigger('jobs.cancel', info=job)
if not event.defaultPrevented:
job = self.updateJob(job, status=JobStatus.CANCELED)
return job
def createLocalJob(self, module, function=None, **kwargs):
"""
Takes the same keyword arguments as :py:func:`createJob`, except this
sets the handler to the local handler and takes additional parameters
to specify the module and function that should be run.
:param module: The name of the python module to run.
:type module: str
:param function: Function name within the module to run. If not passed,
the default name of "run" will be used.
:type function: str or None
:returns: The job that was created.
"""
kwargs['handler'] = JOB_HANDLER_LOCAL
kwargs['save'] = False
job = self.createJob(**kwargs)
job['module'] = module
if function is not None:
job['function'] = function
return self.save(job)
def createJob(self, title, type, args=(), kwargs=None, user=None, when=None,
interval=0, public=False, handler=None, asynchronous=False,
save=True, parentJob=None, otherFields=None):
"""
Create a new job record.
:param title: The title of the job.
:type title: str
:param type: The type of the job.
:type type: str
:param args: Positional args of the job payload.
:type args: list or tuple
:param kwargs: Keyword arguments of the job payload.
:type kwargs: dict
:param user: The user creating the job.
:type user: dict or None
:param when: Minimum start time for the job (UTC).
:type when: datetime
:param interval: If this job should be recurring, set this to a value
in seconds representing how often it should occur. Set to <= 0 for
jobs that should only be run once.
:type interval: int
:param public: Public read access flag.
:type public: bool
:param handler: If this job should be handled by a specific handler,
use this field to store that information.
:param externalToken: If an external token was created for updating this
job, pass it in and it will have the job-specific scope set.
:type externalToken: token (dict) or None.
:param asynchronous: Whether the job is to be run asynchronously. For now this
only applies to jobs that are scheduled to run locally.
:type asynchronous: bool
:param save: Whether the documented should be saved to the database.
:type save: bool
:param parentJob: The job which will be set as a parent
:type parentJob: Job
:param otherFields: Any additional fields to set on the job.
:type otherFields: dict
"""
now = datetime.datetime.utcnow()
if when is None:
when = now
if kwargs is None:
kwargs = {}
otherFields = otherFields or {}
parentId = None
if parentJob:
parentId = parentJob['_id']
job = {
'title': title,
'type': type,
'args': args,
'kwargs': kwargs,
'created': now,
'updated': now,
'when': when,
'interval': interval,
'status': JobStatus.INACTIVE,
'progress': None,
'log': [],
'meta': {},
'handler': handler,
'asynchronous': asynchronous,
'timestamps': [],
'parentId': parentId
}
job.update(otherFields)
self.setPublic(job, public=public)
if user:
job['userId'] = user['_id']
self.setUserAccess(job, user=user, level=AccessType.ADMIN)
else:
job['userId'] = None
if save:
job = self.save(job)
if user:
deserialized_kwargs = job['kwargs']
job['kwargs'] = json_util.dumps(job['kwargs'])
Notification().createNotification(
type='job_created', data=job, user=user,
expires=datetime.datetime.utcnow() + datetime.timedelta(seconds=30))
job['kwargs'] = deserialized_kwargs
return job
def save(self, job, *args, **kwargs):
"""
We extend save so that we can serialize the kwargs before sending them
to the database. This will allow kwargs with $ and . characters in the
keys.
"""
job['kwargs'] = json_util.dumps(job['kwargs'])
job = super().save(job, *args, **kwargs)
job['kwargs'] = json_util.loads(job['kwargs'])
return job
def find(self, *args, **kwargs):
"""
Overrides the default find behavior to exclude the log by default.
:param includeLog: Whether to include the log field in the documents.
:type includeLog: bool
"""
kwargs['fields'] = self._computeFields(kwargs)
return super().find(*args, **kwargs)
def load(self, *args, **kwargs):
"""
We extend load to deserialize the kwargs back into a dict since we
serialized them on the way into the database.
:param includeLog: Whether to include the log field in the document.
:type includeLog: bool
"""
kwargs['fields'] = self._computeFields(kwargs)
job = super().load(*args, **kwargs)
if job and isinstance(job.get('kwargs'), str):
job['kwargs'] = json_util.loads(job['kwargs'])
if job and isinstance(job.get('log'), str):
# Legacy support: log used to be just a string, but we want to
# consistently return a list of strings now.
job['log'] = [job['log']]
return job
def scheduleJob(self, job):
"""
Trigger the event to schedule this job. Other plugins are in charge of
actually scheduling and/or executing the job, except in the case when
the handler is 'local'.
"""
if job.get('asynchronous', job.get('async')) is True:
events.daemon.trigger('jobs.schedule', info=job)
else:
events.trigger('jobs.schedule', info=job)
def | |
<filename>movingpandas/trajectory.py
# -*- coding: utf-8 -*-
import os
import sys
import contextily as ctx
from shapely.affinity import translate
from shapely.geometry import Point, LineString
from fiona.crs import from_epsg
from datetime import datetime
sys.path.append(os.path.dirname(__file__))
from movingpandas import overlay
from movingpandas.geometry_utils import azimuth, calculate_initial_compass_bearing, measure_distance_spherical, \
measure_distance_euclidean
SPEED_COL_NAME = 'speed'
DIRECTION_COL_NAME = 'direction'
class Trajectory:
def __init__(self, traj_id, df, parent=None):
if len(df) < 2:
raise ValueError("Trajectory dataframe must have at least two rows!")
self.id = traj_id
df.sort_index(inplace=True)
self.df = df[~df.index.duplicated(keep='first')]
self.crs = df.crs
self.parent = parent
self.context = None
def __str__(self):
try:
line = self.to_linestring()
except RuntimeError:
return "Invalid trajectory!"
return "Trajectory {1} ({2} to {3}) | Size: {0} | Length: {6:.1f}m\nBounds: {5}\n{4}".format(
self.df.geometry.count(), self.id, self.get_start_time(),
self.get_end_time(), line.wkt[:100], self.get_bbox(), self.get_length())
def __eq__(self, other):
# TODO: make bullet proof
return str(self) == str(other) and self.crs == other.crs and self.parent == other.parent
def copy(self):
return Trajectory(self.id, self.df.copy(), self.parent)
def plot(self, with_basemap=False, for_basemap=False, *args, **kwargs):
temp_df = self.df.copy()
if 'column' in kwargs:
if kwargs['column'] == SPEED_COL_NAME and SPEED_COL_NAME not in self.df.columns:
temp_df = self.get_df_with_speed()
temp_df = temp_df.assign(prev_pt=temp_df.geometry.shift())
temp_df['line'] = temp_df.apply(self._connect_prev_pt_and_geometry, axis=1)
if with_basemap:
if 'url' in kwargs and 'zoom' in kwargs:
url = kwargs.pop('url')
zoom = kwargs.pop('zoom')
ax = temp_df.set_geometry('line')[1:].to_crs(epsg=3857).plot(*args, **kwargs)
return ctx.add_basemap(ax, url=url, zoom=zoom)
elif 'url' in kwargs:
url = kwargs.pop('url')
ax = temp_df.set_geometry('line')[1:].to_crs(epsg=3857).plot(*args, **kwargs)
return ctx.add_basemap(ax, url=url)
else:
ax = temp_df.set_geometry('line')[1:].to_crs(epsg=3857).plot(*args, **kwargs)
return ctx.add_basemap(ax)
else:
if for_basemap:
return temp_df.set_geometry('line')[1:].to_crs(epsg=3857).plot(*args, **kwargs)
else:
return temp_df.set_geometry('line')[1:].plot(*args, **kwargs)
def set_crs(self, crs):
"""Set coordinate reference system of Trajectory using string of SRID."""
self.crs = crs
def is_valid(self):
"""Return Boolean of whether Trajectory meets minimum prerequisites."""
if len(self.df) < 2:
return False
if not self.get_start_time() < self.get_end_time():
return False
return True
def is_latlon(self):
"""Return Boolean of whether coordinate reference system is WGS 84."""
if self.crs['init'] == from_epsg(4326)['init']:
return True
else:
return False
def has_parent(self):
"""Return Boolean of whether Trajectory object has parent."""
return self.parent is not None
def to_linestring(self):
"""Return shapely Linestring object of Trajectory."""
try:
return point_gdf_to_linestring(self.df)
except RuntimeError:
raise RuntimeError("Cannot generate linestring")
def to_linestringm_wkt(self):
"""Return WKT Linestring M as string of Trajectory object."""
# Shapely only supports x, y, z. Therefore, this is a bit hacky!
coords = ''
for index, row in self.df.iterrows():
pt = row.geometry
t = to_unixtime(index)
coords += "{} {} {}, ".format(pt.x, pt.y, t)
wkt = "LINESTRING M ({})".format(coords[:-2])
return wkt
def get_start_location(self):
"""Return shapely Point object of Trajectory's start location."""
return self.df.head(1).geometry[0]
def get_end_location(self):
"""Return shapely Point object of Trajectory's end location."""
return self.df.tail(1).geometry[0]
def get_bbox(self):
"""Return tuple of minimum & maximum x & y of Trajectory's locations."""
return self.to_linestring().bounds # (minx, miny, maxx, maxy)
def get_start_time(self):
"""Return datetime.datetime object of Trajectory's start location."""
return self.df.index.min().to_pydatetime()
def get_end_time(self):
"""Return datetime.datetime object of Trajectory's start location."""
return self.df.index.max().to_pydatetime()
def get_duration(self):
"""Return datetime.timedelta object of Trajectory's duration."""
return self.get_end_time() - self.get_start_time()
def get_row_at(self, t, method='nearest'):
"""Return pandas series of position at given datetime object."""
try:
return self.df.loc[t]
except KeyError:
return self.df.iloc[self.df.index.sort_values().drop_duplicates().get_loc(t, method=method)]
def interpolate_position_at(self, t):
"""Return interpolated shapely Point at given datetime object."""
prev_row = self.get_row_at(t, 'ffill')
next_row = self.get_row_at(t, 'bfill')
t_diff = next_row.name - prev_row.name
t_diff_at = t - prev_row.name
line = LineString([prev_row.geometry, next_row.geometry])
if t_diff == 0 or line.length == 0:
return prev_row.geometry
interpolated_position = line.interpolate(t_diff_at/t_diff*line.length)
return interpolated_position
def get_position_at(self, t, method='interpolated'):
"""Return shapely Point at given datetime using the provided method."""
if method not in ['nearest', 'interpolated', 'ffill', 'bfill']:
raise ValueError('Invalid split method {}. Must be one of [nearest, interpolated, ffill, bfill]'.
format(method))
if method == 'interpolated':
return self.interpolate_position_at(t)
else:
row = self.get_row_at(t, method)
try:
return row.geometry[0]
except TypeError:
return row.geometry
def get_linestring_between(self, t1, t2, method='interpolated'):
"""Return shapely LineString between given datetime objects and split method."""
if method not in ['interpolated', 'within']:
raise ValueError('Invalid split method {}. Must be one of [interpolated, within]'.format(method))
if method == 'interpolated':
st_range = overlay.SpatioTemporalRange(self.get_position_at(t1), self.get_position_at(t2), t1, t2)
temp_df = overlay.create_entry_and_exit_points(self, st_range)
temp_df = temp_df[t1:t2]
return point_gdf_to_linestring(temp_df)
else:
try:
return point_gdf_to_linestring(self.get_segment_between(t1, t2).df)
except RuntimeError:
raise RuntimeError("Cannot generate linestring between {0} and {1}".format(t1, t2))
def get_segment_between(self, t1, t2):
"""Return Trajectory object between given datetime objects."""
segment = Trajectory(self.id, self.df[t1:t2], parent=self)
if not segment.is_valid():
raise RuntimeError("Failed to extract valid trajectory segment between {} and {}".format(t1, t2))
return segment
def _compute_distance(self, row):
pt0 = row['prev_pt']
pt1 = row['geometry']
if type(pt0) != Point:
return 0.0
if pt0 == pt1:
return 0.0
if self.is_latlon():
dist_meters = measure_distance_spherical(pt0, pt1)
else: # The following distance will be in CRS units that might not be meters!
dist_meters = measure_distance_euclidean(pt0, pt1)
return dist_meters
def add_prev_pt(self, force=True):
"""create a shifted geometry column with previous positions,
required for several calculations
"""
if 'prev_pt' not in self.df.columns or force:
# TODO: decide on default enforcement behavior
self.df = self.df.assign(prev_pt=self.df.geometry.shift())
def get_length(self):
"""Return float of length of Trajectory object.
This is calculated with the measurement unit of the CRS used, except
when using WGS 84 when it is calculated in metres.
"""
temp_df = self.df.assign(prev_pt=self.df.geometry.shift())
temp_df = temp_df.assign(dist_to_prev=temp_df.apply(self._compute_distance, axis=1))
return temp_df['dist_to_prev'].sum()
def get_direction(self):
"""Return compass bearing as float of Trajectory object."""
pt0 = self.get_start_location()
pt1 = self.get_end_location()
if self.is_latlon():
return calculate_initial_compass_bearing(pt0, pt1)
else:
return azimuth(pt0, pt1)
def _compute_heading(self, row):
pt0 = row['prev_pt']
pt1 = row['geometry']
if type(pt0) != Point:
return 0.0
if pt0 == pt1:
return 0.0
if self.is_latlon():
return calculate_initial_compass_bearing(pt0, pt1)
else:
return azimuth(pt0, pt1)
def _compute_speed(self, row):
pt0 = row['prev_pt']
pt1 = row['geometry']
if type(pt0) != Point:
return 0.0
if type(pt1) != Point:
raise ValueError('Invalid trajectory! Got {} instead of point!'.format(pt1))
if pt0 == pt1:
return 0.0
if self.is_latlon():
dist_meters = measure_distance_spherical(pt0, pt1)
else: # The following distance will be in CRS units that might not be meters!
dist_meters = measure_distance_euclidean(pt0, pt1)
return dist_meters / row['delta_t'].total_seconds()
@staticmethod
def _connect_prev_pt_and_geometry(row):
pt0 = row['prev_pt']
pt1 = row['geometry']
if type(pt0) != Point:
return None
if type(pt1) != Point:
raise ValueError('Invalid trajectory! Got {} instead of point!'.format(pt1))
if pt0 == pt1:
# to avoid intersection issues with zero length lines
pt1 = translate(pt1, 0.00000001, 0.00000001)
return LineString(list(pt0.coords) + list(pt1.coords))
def add_direction(self, overwrite=False):
"""Add direction column and values to Trajectory object's DataFrame."""
if DIRECTION_COL_NAME in self.df.columns and not overwrite:
raise RuntimeError('Trajectory already has direction values! Use overwrite=True to overwrite exiting values.')
self.add_prev_pt()
self.df[DIRECTION_COL_NAME] = self.df.apply(self._compute_heading, axis=1)
self.df.at[self.get_start_time(), DIRECTION_COL_NAME] = self.df.iloc[1][DIRECTION_COL_NAME]
def add_speed(self, overwrite=False):
"""Add speed column and values to Trajectory object's DataFrame.
This is calculated with the measurement unit of the CRS used, except
when using WGS 84 when it is calculated in metres. This is then divided
by total seconds.
"""
if SPEED_COL_NAME in self.df.columns and not overwrite:
raise RuntimeError('Trajectory already has speed values! Use overwrite=True to overwrite exiting values.')
self.df = self.get_df_with_speed()
def get_df_with_speed(self):
"""Add speed column and values to Trajectory object's DataFrame.
This is calculated with the measurement unit of the CRS used, except
when using WGS 84 when it is calculated in metres. This is then divided
by total seconds.
"""
temp_df = self.df.copy()
temp_df = temp_df.assign(prev_pt=temp_df.geometry.shift())
if 't' in temp_df.columns:
times = temp_df.t
else:
times = temp_df.reset_index().t
temp_df = temp_df.assign(delta_t=times.diff().values)
try:
temp_df[SPEED_COL_NAME] = temp_df.apply(self._compute_speed, axis=1)
except ValueError as e:
raise e
# set the speed in the first row to the speed of the second row
temp_df.at[self.get_start_time(), SPEED_COL_NAME] = temp_df.iloc[1][SPEED_COL_NAME]
temp_df = temp_df.drop(columns=['prev_pt', 'delta_t'])
return temp_df
def intersects(self, polygon):
return overlay.intersects(self, polygon)
def clip(self, polygon, pointbased=False):
"""Return clipped Trajectory with polygon as Trajectory object."""
return overlay.clip(self, polygon, pointbased)
def intersection(self, feature):
return overlay.intersection(self, feature)
def split_by_date(self, mode='day'):
"""Return list of Trajectory objects split by date."""
result = []
if mode == 'day':
grouped = self.df.groupby(self.df.index.date)
elif mode == 'year':
grouped = self.df.groupby(self.df.index.year)
else:
raise ValueError('Invalid split mode {}. Must be one of [day, year]'.format(mode))
for key, values in grouped:
if len(values) > 1:
result.append(Trajectory('{}_{}'.format(self.id, key), values))
return result
def split_by_observation_gap(self, gap):
result = []
temp_df = self.df.copy()
temp_df['t'] = temp_df.index
temp_df['gap'] = temp_df['t'].diff() > gap
temp_df['gap'] = temp_df['gap'].apply(lambda x: 1 if x else 0).cumsum()
dfs = [group[1] for group in temp_df.groupby(temp_df['gap'])]
for i, df in enumerate(dfs):
| |
'''
Some util functions
Part of the code is referenced from Kaggle
'''
import os
import cv2
import torch
import random
import numpy as np
import pandas as pd
from . import fmix
from tqdm import tqdm
from torch.utils.data import Dataset
from torch.cuda.amp import autocast
import copy
def seed_everything(seed):
'''All kinds of random seeds are fixed to facilitate ablation experiments.
Args:
seed : int
'''
# Fixed random seed in scipy
random.seed(seed) # Random seed of fixed random library
os.environ['PYTHONHASHSEED'] = str(seed) # Fixed randomness of Python hashes (may not valid)
np.random.seed(seed) # Fixed random seed for Numpy
torch.manual_seed(seed) # Fixed Torch CPU calculating random seeds
torch.cuda.manual_seed(seed) # Fixed CUDA calculating random seeds
torch.backends.cudnn.deterministic = True # Whether the calculation of the convolution operator is fixed.The underlying TORCH has different libraries to implement the convolution operator
torch.backends.cudnn.benchmark = True # Whether to enable automatic optimization and select the fastest convolution calculation method
def create_result_folder(path):
'''Create a folder according to the path to store all the trained models obtained from this train
Args:
path : str target path to be created, e.g '../models/new_folder'
'''
os.makedirs(path)
def get_sub_training_set(original_df, frac=0.15):
'''Create subset of training csv input to acquire smaller input to save time for parameter optimization attempting
:param original_df: pandas.dataframe
:param frac: frac of subset from original dataframe for each label
:return: sub_df: pandas.dataframe
'''
labels = sorted(original_df['label'].unique())
sub_df = original_df[original_df['label'] == labels[0]].sample(frac=frac, replace=False)
for l in labels[1:]:
sub_df = pd.concat([sub_df, original_df[original_df['label'] == l].sample(frac=frac, replace=False)])
sub_df = sub_df.sample(frac=1).reset_index(drop=True)
return sub_df
def get_img(path):
'''Load the image with OpenCV.
Due to historical reasons, OpenCV reads images in the BGR format (Old TV setting)
Args:
path : str Image file path e.g '../data/train_img/1.jpg'
'''
img_bgr = cv2.imread(path)
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
return img_rgb
def rand_bbox(size, lam):
'''Cutmix bbox interception function
Args:
size : tuple Image sizes e.g (256,256)
lam : float ratio of image left
Returns:
The upper-left and lower-right coordinates of the bbox
int,int,int,int
'''
W = size[0] # the width of the image
H = size[1] # the height of the image
cut_rat = np.sqrt(1. - lam) # interception ratio (1 - ratio of image left)
cut_w = np.int(W * cut_rat) # The width of the bbox
cut_h = np.int(H * cut_rat) # The height of the bbox
cx = np.random.randint(W) # Uniformly distributed sampling, the X coordinate of the center point of the intercepted bbox is randomly selected
cy = np.random.randint(H) # Uniformly distributed sampling, the Y coordinate of the center point of the intercepted bbox is randomly selected
bbx1 = np.clip(cx - cut_w // 2, 0, W) # The top-left x-coordinate
bby1 = np.clip(cy - cut_h // 2, 0, H) # The top-left y-coordinate
bbx2 = np.clip(cx + cut_w // 2, 0, W) # The lower-right x-coordinate
bby2 = np.clip(cy + cut_h // 2, 0, H) # The lower-right y-coordinate
return bbx1, bby1, bbx2, bby2
class CassavaDataset(Dataset):
'''data loading class
Attributes:
__len__ : lenghth of data samples
__getitem__ : Index function
'''
def __init__(
self,
df,
data_root,
transforms=None,
output_label=True,
label_smoothing=True,
do_fmix=False,
fmix_params={
'alpha': 1.,
'decay_power': 3.,
'shape': (512, 512),
'max_soft': 0.3,
'reformulate': False
},
fmix_probability=0.5,
do_cutmix=False,
cutmix_params={
'alpha': 1,
},
cutmix_probability=0.5):
'''
Args:
df : DataFrame , The file name and label of the sample image
data_root : str , The file path where the image is located, absolute path
transforms : object , Image augmentation
output_label : bool , Whether output labels
label_smoothing : bool , Whether label_smoothing
do_fmix : bool , Whether to use fmix
fmix_params :dict , fmix parameters {'alpha':1.,'decay_power':3.,'shape':(256,256),'max_soft':0.3,'reformulate':False}
do_cutmix : bool, Whether to use cutmix
cutmix_params : dict , cutmix parameters {'alpha':1.}
Raises:
'''
super().__init__()
self.df = df.reset_index(drop=True).copy() # Regenerate index
self.transforms = transforms
self.data_root = data_root
self.do_fmix = do_fmix
self.fmix_params = fmix_params
self.fmix_probablity = fmix_probability
self.do_cutmix = do_cutmix
self.cutmix_params = cutmix_params
self.cutmix_probability = cutmix_probability
self.output_label = output_label
self.label_smoothing = label_smoothing
if output_label:
self.labels = self.df['label'].values
if label_smoothing:
if not isinstance(self.labels, (list, np.ndarray)):
raise ValueError("labels must be 1-D list or array")
self.labels = torch.LongTensor(self.labels).view(-1, 1)
zeros_tensor = torch.zeros(self.df.shape[0], self.df['label'].max() + 1)
filled = zeros_tensor.fill_(0.05 / self.df['label'].max())
self.labels = filled.scatter_(1, self.labels, 0.95) # Generate the label smoothing
def __len__(self):
return self.df.shape[0]
def __getitem__(self, index):
'''
Args:
index : int , index
Returns:
img, target(optional)
'''
if self.output_label:
target = self.labels[index]
img = get_img(
os.path.join(self.data_root,
self.df.loc[index]['image_id'])) # join the path and load the image
if self.transforms: # Using image augmentation
img = self.transforms(image=img)['image']
if self.do_fmix and np.random.uniform(
0., 1., size=1)[0] > (1 - self.fmix_probablity): # 50% chance of triggering FMIX data augmentation (probability can be modified)
with torch.no_grad():
lam, mask = fmix.sample_mask(
**self.fmix_params) # Can be modified, which uses the clip to specify the upper and lower limits
fmix_ix = np.random.choice(self.df.index,
size=1)[0] # Randomly select the images to mix
fmix_img = get_img(
os.path.join(self.data_root,
self.df.loc[fmix_ix]['image_id']))
if self.transforms:
fmix_img = self.transforms(image=fmix_img)['image']
mask_torch = torch.from_numpy(mask)
img = mask_torch * img + (1. - mask_torch) * fmix_img # Mix the picture
rate = mask.sum() / float(img.size) # Get the rate of Mix
target = rate * target + (
1. - rate) * self.labels[fmix_ix] # Target to mix (should use one-hot first !)
if self.do_cutmix and np.random.uniform(
0., 1., size=1)[0] > (1 - self.cutmix_probability): # 50% chance to trigger cutmix data augmentation (probability can be modified)
with torch.no_grad():
cmix_ix = np.random.choice(self.df.index, size=1)[0]
cmix_img = get_img(
os.path.join(self.data_root,
self.df.loc[cmix_ix]['image_id']))
if self.transforms:
cmix_img = self.transforms(image=cmix_img)['image']
lam = np.clip(
np.random.beta(self.cutmix_params['alpha'],
self.cutmix_params['alpha']), 0.3, 0.4)
bbx1, bby1, bbx2, bby2 = rand_bbox(cmix_img.shape[:2], lam)
img[:, bbx1:bbx2, bby1:bby2] = cmix_img[:, bbx1:bbx2,
bby1:bby2]
rate = 1 - ((bbx2 - bbx1) *
(bby2 - bby1) / float(img.size)) # Get the rate of Mix
target = rate * target + (
1. - rate) * self.labels[cmix_ix] # Target to mix (should use one-hot first !)
if self.output_label:
return img, target
else:
return img
def prepare_dataloader(df, trn_idx, val_idx, data_root, trn_transform,
val_transform, bs, n_job):
'''Multithreaded data generator
Args:
df : DataFrame , The file name and label of the sample image
trn_idx : ndarray , Training set index list
val_idx : ndarray , Validation set index list
data_root : str , The path of the image file
trn_transform : object , Training set image augmentation
val_transform : object , Validation set image augmentation
bs : int , Number of batchsizes per time !!!
n_job : int , Number of threads in use
Returns:
train_loader, val_loader , Data generators for training sets and validation sets
'''
train_ = df.loc[trn_idx, :].reset_index(drop=True) # Regenerate index
valid_ = df.loc[val_idx, :].reset_index(drop=True) # Regenerate index
train_ds = CassavaDataset(train_,
data_root,
transforms=trn_transform,
output_label=True,
label_smoothing=True,
do_fmix=False,
do_cutmix=False)
valid_ds = CassavaDataset(valid_,
data_root,
transforms=val_transform,
output_label=True,
label_smoothing=True,
do_fmix=False,
do_cutmix=False)
train_loader = torch.utils.data.DataLoader(
train_ds,
batch_size=bs,
pin_memory=False,
drop_last=False,
shuffle=True,
num_workers=n_job,
)
val_loader = torch.utils.data.DataLoader(
valid_ds,
batch_size=bs,
pin_memory=False,
drop_last=False,
shuffle=False,
num_workers=n_job,
)
return train_loader, val_loader
def train_one_epoch(epoch,
model,
loss_fn,
optimizer,
train_loader,
device,
scaler,
scheduler=None,
schd_batch_update=False,
accum_iter=2):
'''The training function for each epoch
Args:
epoch : int , which epoch now
model : object, the imported architecture of model
loss_fn : object, loss function
optimizer : object, optimization method
train_loader : object, Training set data generator
scaler : object, Gradient amplifier
device : str , Training devices e.g 'cuda:0'
scheduler : object , Learning rate adjustment strategy
schd_batch_update : bool, If true, adjust each batch, otherwise wait until the epoch has finished
accum_iter : int , Gradient accumulation
'''
model.train() # Training Mode
running_loss = None
pbar = tqdm(enumerate(train_loader), total=len(train_loader)) # progress bar
for step, (imgs, image_labels) in pbar: # Iterate through each batch
imgs = imgs.to(device).float()
image_labels = image_labels.to(device)
#TODO
# test double()
# image_labels = image_labels.to(device).double()
with autocast(): # Enable automatic mix accuracy
image_preds = model(imgs) # Propagate forward and calculate the predicted value
loss = loss_fn(image_preds, image_labels) # Calculating the loss
scaler.scale(loss).backward() # scale gradient
# loss regularization with exponential average
if running_loss is None:
running_loss = copy.copy(loss)
else:
running_loss = running_loss * .99 + copy.copy(loss) * .01
if ((step + 1) % accum_iter == 0) or ((step + 1) == len(train_loader)):
| |
<reponame>sprksh/finance-calculator
from tests.data import scheme_data, benchmark_data
benchmark_scheme_code = 'S0089095'
benchmark_scheme_name = 'Nifty Largemidcap 250 (Total Return)'
# benchmark_data = [
# ('2017-06-01', 6249.670000),
# ('2017-06-02', 6283.580000),
# ('2017-06-05', 6312.220000),
# ('2017-06-06', 6287.370000),
# ('2017-06-07', 6322.570000),
# ('2017-06-08', 6326.120000),
# ('2017-06-09', 6344.990000),
# ('2017-06-12', 6307.140000),
# ('2017-06-13', 6310.080000),
# ('2017-06-14', 6330.510000),
# ('2017-06-15', 6322.430000),
# ('2017-06-16', 6325.720000),
# ('2017-06-19', 6340.890000),
# ('2017-06-20', 6344.720000),
# ('2017-06-21', 6341.670000),
# ('2017-06-22', 6322.240000),
# ('2017-06-23', 6261.470000),
# ('2017-06-27', 6201.430000),
# ('2017-06-28', 6202.960000),
# ('2017-06-29', 6228.370000),
# ('2017-06-30', 6266.390000),
# ('2017-07-03', 6326.000000),
# ('2017-07-04', 6317.730000),
# ('2017-07-05', 6359.860000),
# ('2017-07-06', 6380.100000),
# ('2017-07-07', 6379.840000),
# ('2017-07-10', 6426.220000),
# ('2017-07-11', 6403.320000),
# ('2017-07-12', 6439.460000),
# ('2017-07-13', 6482.120000),
# ('2017-07-14', 6482.490000),
# ('2017-07-17', 6490.430000),
# ('2017-07-18', 6444.600000),
# ('2017-07-19', 6496.640000),
# ('2017-07-20', 6484.510000),
# ('2017-07-21', 6495.610000),
# ('2017-07-24', 6514.340000),
# ('2017-07-25', 6530.680000),
# ('2017-07-26', 6553.130000),
# ('2017-07-27', 6549.750000),
# ('2017-07-28', 6565.680000),
# ('2017-07-31', 6596.120000),
# ('2017-08-01', 6605.730000),
# ('2017-08-02', 6596.400000),
# ('2017-08-03', 6542.440000),
# ('2017-08-04', 6573.180000),
# ('2017-08-07', 6616.810000),
# ('2017-08-08', 6562.290000),
# ('2017-08-09', 6481.690000),
# ('2017-08-10', 6347.890000),
# ('2017-08-11', 6304.350000),
# ('2017-08-14', 6415.380000),
# ('2017-08-16', 6491.910000),
# ('2017-08-17', 6494.510000),
# ('2017-08-18', 6464.360000),
# ('2017-08-21', 6401.110000),
# ('2017-08-22', 6382.730000),
# ('2017-08-23', 6448.680000),
# ('2017-08-24', 6469.980000),
# ('2017-08-28', 6525.200000),
# ('2017-08-29', 6458.990000),
# ('2017-08-30', 6531.320000),
# ('2017-08-31', 6562.400000),
# ('2017-09-01', 6611.730000),
# ('2017-09-04', 6570.390000),
# ('2017-09-05', 6615.920000),
# ('2017-09-06', 6611.720000),
# ('2017-09-07', 6633.240000),
# ('2017-09-08', 6628.440000),
# ('2017-09-11', 6676.670000),
# ('2017-09-12', 6741.560000),
# ('2017-09-13', 6714.870000),
# ('2017-09-14', 6740.700000),
# ('2017-09-15', 6753.900000),
# ('2017-09-18', 6801.960000),
# ('2017-09-19', 6806.650000),
# ('2017-09-20', 6789.070000),
# ('2017-09-21', 6769.620000),
# ('2017-09-22', 6623.560000),
# ('2017-09-25', 6540.050000),
# ('2017-09-26', 6565.970000),
# ('2017-09-27', 6458.900000),
# ('2017-09-28', 6494.380000),
# ('2017-09-29', 6532.450000),
# ('2017-10-03', 6579.250000),
# ('2017-10-04', 6608.970000),
# ('2017-10-05', 6620.850000),
# ('2017-10-06', 6682.540000),
# ('2017-10-09', 6686.540000),
# ('2017-10-10', 6724.910000),
# ('2017-10-11', 6682.110000),
# ('2017-10-12', 6755.780000),
# ('2017-10-13', 6779.930000),
# ('2017-10-16', 6819.300000),
# ('2017-10-17', 6835.820000),
# ('2017-10-18', 6822.610000),
# ('2017-10-19', 6789.830000),
# ('2017-10-23', 6819.220000),
# ('2017-10-24', 6838.170000),
# ('2017-10-25', 6884.180000),
# ('2017-10-26', 6915.230000),
# ('2017-10-27', 6913.040000),
# ('2017-10-30', 6976.180000),
# ('2017-10-31', 6978.180000),
# ('2017-11-01', 7030.350000),
# ('2017-11-02', 7043.920000),
# ('2017-11-03', 7055.150000),
# ('2017-11-06', 7063.560000),
# ('2017-11-07', 6981.150000),
# ('2017-11-08', 6938.710000),
# ('2017-11-09', 6984.940000),
# ('2017-11-10', 6996.290000),
# ('2017-11-13', 6957.060000),
# ('2017-11-14', 6934.650000),
# ('2017-11-15', 6871.500000),
# ('2017-11-16', 6946.260000),
# ('2017-11-17', 7003.360000),
# ('2017-11-20', 7031.360000),
# ('2017-11-21', 7047.110000),
# ('2017-11-22', 7053.230000),
# ('2017-11-23', 7065.880000),
# ('2017-11-24', 7101.260000),
# ('2017-11-27', 7124.880000),
# ('2017-11-28', 7118.780000),
# ('2017-11-29', 7106.750000),
# ('2017-11-30', 7055.340000),
# ('2017-12-01', 6989.140000),
# ('2017-12-04', 6991.290000),
# ('2017-12-05', 6997.330000),
# ('2017-12-06', 6943.320000),
# ('2017-12-07', 7028.140000),
# ('2017-12-08', 7092.490000),
# ('2017-12-11', 7123.680000),
# ('2017-12-12', 7068.940000),
# ('2017-12-13', 7022.510000),
# ('2017-12-14', 7036.270000),
# ('2017-12-15', 7108.520000),
# ('2017-12-18', 7154.320000),
# ('2017-12-19', 7228.390000),
# ('2017-12-20', 7242.940000),
# ('2017-12-21', 7280.020000),
# ('2017-12-22', 7306.300000),
# ('2017-12-26', 7335.280000),
# ('2017-12-27', 7324.190000),
# ('2017-12-28', 7326.610000),
# ('2017-12-29', 7376.630000),
# ('2018-01-01', 7341.750000),
# ('2018-01-02', 7310.600000),
# ('2018-01-03', 7334.380000),
# ('2018-01-04', 7383.780000),
# ('2018-01-05', 7444.590000),
# ('2018-01-08', 7500.390000),
# ('2018-01-09', 7496.610000),
# ('2018-01-10', 7479.640000),
# ('2018-01-11', 7499.080000),
# ('2018-01-12', 7501.660000),
# ('2018-01-15', 7519.190000),
# ('2018-01-16', 7433.510000),
# ('2018-01-17', 7490.100000),
# ('2018-01-18', 7428.990000),
# ('2018-01-19', 7494.980000),
# ('2018-01-22', 7544.150000),
# ('2018-01-23', 7608.550000),
# ('2018-01-24', 7578.670000),
# ('2018-01-25', 7535.590000),
# ('2018-01-29', 7523.100000),
# ('2018-01-30', 7465.510000),
# ('2018-01-31', 7409.630000),
# ('2018-02-01', 7393.150000),
# ('2018-02-02', 7151.670000),
# ('2018-02-05', 7103.930000),
# ('2018-02-06', 6984.310000),
# ('2018-02-07', 7001.640000),
# ('2018-02-08', 7104.270000),
# ('2018-02-09', 7069.890000),
# ('2018-02-12', 7153.900000),
# ('2018-02-14', 7150.510000),
# ('2018-02-15', 7134.130000),
# ('2018-02-16', 7072.080000),
# ('2018-02-19', 7010.390000),
# ('2018-02-20', 7001.110000),
# ('2018-02-21', 7013.450000),
# ('2018-02-22', 6987.110000),
# ('2018-02-23', 7082.970000),
# ('2018-02-26', 7140.810000),
# ('2018-02-27', 7115.930000),
# ('2018-02-28', 7098.300000),
# ('2018-03-01', 7067.870000),
# ('2018-03-05', 7003.200000),
# ('2018-03-06', 6933.850000),
# ('2018-03-07', 6848.640000),
# ('2018-03-08', 6899.100000),
# ('2018-03-09', 6887.220000),
# ('2018-03-12', 6976.030000),
# ('2018-03-13', 7014.610000),
# ('2018-03-14', 7016.050000),
# ('2018-03-15', 7014.240000),
# ('2018-03-16', 6925.280000),
# ('2018-03-19', 6833.680000),
# ('2018-03-20', 6857.630000),
# ('2018-03-21', 6878.370000),
# ('2018-03-22', 6841.190000),
# ('2018-03-23', 6758.010000),
# ('2018-03-26', 6847.160000),
# ('2018-03-27', 6906.930000),
# ('2018-03-28', 6866.670000),
# ('2018-04-02', 6958.580000),
# ('2018-04-03', 7001.730000),
# ('2018-04-04', 6928.720000),
# ('2018-04-05', 7055.850000),
# ('2018-04-06', 7078.350000),
# ('2018-04-09', 7102.200000),
# ('2018-04-10', 7114.100000),
# ('2018-04-11', 7122.720000),
# ('2018-04-12', 7123.350000),
# ('2018-04-13', 7145.720000),
# ('2018-04-16', 7183.730000),
# ('2018-04-17', 7200.170000),
# ('2018-04-18', 7187.780000),
# ('2018-04-19', 7223.920000),
# ('2018-04-20', 7211.110000),
# ('2018-04-23', 7235.760000),
# ('2018-04-24', 7243.670000),
# ('2018-04-25', 7218.150000),
# ('2018-04-26', 7243.850000),
# ('2018-04-27', 7292.260000),
# ('2018-04-30', 7339.930000),
# ('2018-05-02', 7281.100000),
# ('2018-05-03', 7229.790000),
# ('2018-05-04', 7196.710000),
# ('2018-05-07', 7254.510000),
# ('2018-05-08', 7253.210000),
# ('2018-05-09', 7237.580000),
# ('2018-05-10', 7175.270000),
# ('2018-05-11', 7213.850000),
# ('2018-05-14', 7176.820000),
# ('2018-05-15', 7145.350000),
# ('2018-05-16', 7122.370000),
# ('2018-05-17', 7117.620000),
# ('2018-05-18', 7042.020000),
# ('2018-05-21', 6954.580000),
# ('2018-05-22', 6977.960000),
# ('2018-05-23', 6927.780000),
# ('2018-05-24', 6940.570000),
# ('2018-05-25', 7032.420000),
# ('2018-05-28', 7107.610000),
# ('2018-05-29', 7084.160000),
# ('2018-05-30', 7077.240000),
# ('2018-05-31', 7109.050000),
# ('2018-06-01', 7050.330000),
# ('2018-06-04', 6989.470000),
# ('2018-06-05', 6930.640000),
# ('2018-06-06', 7008.170000),
# ('2018-06-07', 7086.160000),
# ('2018-06-08', 7107.110000),
# ('2018-06-11', 7117.230000),
# ('2018-06-12', 7163.080000),
# ('2018-06-13', 7154.750000),
# ('2018-06-14', 7147.480000),
# ('2018-06-15', 7129.390000),
# ('2018-06-18', 7109.610000),
# ('2018-06-19', 7040.720000),
# ('2018-06-20', 7071.620000),
# ('2018-06-21', 7043.150000),
# ('2018-06-22', 7079.300000),
# ('2018-06-25', 7033.180000),
# ('2018-06-26', 7028.140000),
# ('2018-06-27', 6945.040000),
# ('2018-06-28', 6858.360000),
# ('2018-06-29', 6959.570000),
# ('2018-07-02', 6923.900000),
# ('2018-07-03', 6960.280000),
# ('2018-07-04', 6989.150000),
# ('2018-07-05', 6961.060000),
# ('2018-07-06', 6985.900000),
# ('2018-07-09', 7063.630000),
# ('2018-07-10', 7120.720000),
# ('2018-07-11', 7105.310000),
# ('2018-07-12', 7114.080000),
# ('2018-07-13', 7076.940000),
# ('2018-07-16', 6963.540000),
# ('2018-07-17', 7055.900000),
# ('2018-07-18', 7010.910000),
# ('2018-07-19', 6981.410000),
# ('2018-07-20', 7029.180000),
# ('2018-07-23', 7093.120000),
# ('2018-07-24', 7168.760000),
# ('2018-07-25', 7167.550000),
# ('2018-07-26', 7198.600000),
# ('2018-07-27', 7265.430000),
# ('2018-07-30', 7288.420000),
# ('2018-07-31', 7314.910000),
# ('2018-08-01', 7322.420000),
# ('2018-08-02', 7292.720000),
# ('2018-08-03', 7364.250000),
# ('2018-08-06', 7385.420000),
# ('2018-08-07', 7379.710000),
# ('2018-08-08', 7400.490000),
# ('2018-08-09', 7422.550000),
# ('2018-08-10', 7391.630000),
# ('2018-08-13', 7345.430000),
# ('2018-08-14', 7415.120000),
# ('2018-08-16', 7392.790000),
# ('2018-08-17', 7458.770000),
# ('2018-08-20', 7510.760000),
# ('2018-08-21', 7529.550000),
# ('2018-08-23', 7532.630000),
# ('2018-08-24', 7515.640000),
# ('2018-08-27', 7601.330000),
# ('2018-08-28', 7614.070000),
# ('2018-08-29', 7619.890000),
# ('2018-08-30', 7623.710000),
# ('2018-08-31', 7642.630000),
# ('2018-09-03', 7598.880000),
# ('2018-09-04', 7478.950000),
# ('2018-09-05', 7441.670000),
# ('2018-09-06', 7476.190000),
# ('2018-09-07', 7535.270000),
# ('2018-09-10', 7428.000000),
# ('2018-09-11', 7331.170000),
# ('2018-09-12', 7365.820000),
# ('2018-09-14', 7471.480000),
# ('2018-09-17', 7406.370000),
# ('2018-09-18', 7321.090000),
# ('2018-09-19', 7273.480000),
# ('2018-09-21', 7167.890000),
# ('2018-09-24', 7011.640000),
# ('2018-09-25', 7039.180000),
# ('2018-09-26', 7048.870000),
# ('2018-09-27', 6948.570000),
# ('2018-09-28', 6869.050000),
# ('2018-10-01', 6899.410000),
# ('2018-10-03', 6817.250000),
# ('2018-10-04', 6670.980000),
# ('2018-10-05', 6496.250000),
# ('2018-10-08', 6445.230000),
# ('2018-10-09', 6418.270000),
# ('2018-10-10', 6595.730000),
# ('2018-10-11', 6456.810000),
# ('2018-10-12', 6614.580000),
# ('2018-10-15', 6656.060000),
# ('2018-10-16', 6730.820000),
# ('2018-10-17', 6618.150000),
# ('2018-10-19', 6526.830000),
# ('2018-10-22', 6466.690000),
# ('2018-10-23', 6403.430000),
# ('2018-10-24', 6462.760000),
# ('2018-10-25', 6414.010000),
# ('2018-10-26', 6384.810000),
# ('2018-10-29', 6542.020000),
# ('2018-10-30', 6554.450000),
# ('2018-10-31', 6671.990000),
# ('2018-11-01', 6699.850000),
# ('2018-11-02', 6771.850000),
# ('2018-11-05', 6752.190000),
# ('2018-11-06', 6736.830000),
# ('2018-11-07', 6787.280000),
# ('2018-11-09', 6818.610000),
# ('2018-11-12', 6754.890000),
# ('2018-11-13', 6793.100000),
# ('2018-11-14', 6795.820000),
# ('2018-11-15', 6826.560000),
# ('2018-11-16', 6843.400000),
# ('2018-11-19', 6882.130000),
# ('2018-11-20', 6815.910000),
# ('2018-11-21', 6815.650000),
# ('2018-11-22', 6764.760000),
# ('2018-11-26', 6804.770000),
# ('2018-11-27', 6837.820000),
# ('2018-11-28', 6822.900000),
# ('2018-11-29', 6878.830000),
# ('2018-11-30', 6908.640000),
# ('2018-12-03', 6931.300000),
# ('2018-12-04', 6924.410000),
# ('2018-12-05', 6852.660000),
# ('2018-12-06', 6743.960000),
# ('2018-12-07', 6770.710000),
# ('2018-12-10', 6639.280000),
# ('2018-12-11', 6712.930000),
# ('2018-12-12', 6859.350000),
# ('2018-12-13', 6906.600000),
# ('2018-12-14', 6917.200000),
# ('2018-12-17', 6956.830000),
# ('2018-12-18', 6973.550000),
# ('2018-12-19', 7037.250000),
# ('2018-12-20', 7032.780000),
# ('2018-12-21', 6923.190000),
# ('2018-12-24', 6869.010000),
# ('2018-12-26', 6884.310000),
# ('2018-12-27', 6913.300000),
# ('2018-12-28', 6974.000000),
# ('2018-12-31', 6995.890000),
# ('2019-01-01', 7009.390000),
# ('2019-01-02', 6935.850000),
# ('2019-01-03', 6875.990000),
# ('2019-01-04', 6907.220000),
# ('2019-01-07', 6920.510000),
# ('2019-01-08', 6927.410000),
# ('2019-01-09', 6939.510000),
# ('2019-01-10', 6947.420000),
# ('2019-01-11', 6935.080000),
# ('2019-01-14', 6904.260000),
# ('2019-01-15', 6967.870000),
# ('2019-01-16', 6963.310000),
# ('2019-01-17', 6959.030000),
# ('2019-01-18', 6937.280000),
# ('2019-01-21', 6926.940000),
# ('2019-01-22', 6913.720000),
# ('2019-01-23', 6877.710000),
# ('2019-01-24', 6873.330000),
# ('2019-01-25', 6802.990000),
# ('2019-01-28', 6705.320000),
# ('2019-01-29', 6710.940000),
# ('2019-01-30', 6729.150000),
# ('2019-01-31', 6799.710000),
# ('2019-02-01', 6838.350000),
# ('2019-02-04', 6817.230000),
# ('2019-02-05', 6794.570000),
# ('2019-02-06', 6837.670000),
# ('2019-02-07', 6867.260000),
# ('2019-02-08', 6786.490000),
# ('2019-02-11', 6716.530000),
# ('2019-02-12', 6693.470000),
# ('2019-02-13', 6669.500000),
# ('2019-02-14', 6672.020000),
# ('2019-02-15', 6633.190000),
# ('2019-02-18', 6571.910000),
# ('2019-02-19', 6577.620000),
# ('2019-02-20', 6648.040000),
# ('2019-02-21', 6697.900000),
# ('2019-02-22', 6716.360000),
# ('2019-02-25', 6755.130000),
# ('2019-02-26', 6739.420000),
# ('2019-02-27', 6746.240000),
# ('2019-02-28', 6763.650000),
# ('2019-03-01', 6831.100000),
# ('2019-03-05', 6943.150000),
# ('2019-03-06', 6979.370000),
# ('2019-03-07', 6969.600000),
# ('2019-03-08', 6959.790000),
# ('2019-03-11', 7073.860000),
# ('2019-03-12', 7132.870000),
# ('2019-03-13', 7124.840000),
# ('2019-03-14', 7124.490000),
# ('2019-03-15', 7169.800000),
# ('2019-03-18', 7169.380000),
# ('2019-03-19', 7209.480000),
# ('2019-03-20', 7190.990000),
# ('2019-03-22', 7155.890000),
# ('2019-03-25', 7093.620000),
# ('2019-03-26', 7167.320000),
# ('2019-03-27', 7175.120000),
# ('2019-03-28', 7254.360000),
# ('2019-03-29', 7310.400000),
# ('2019-04-01', 7334.140000),
# ('2019-04-02', 7344.440000),
# ('2019-04-03', 7288.580000),
# ('2019-04-04', 7270.900000),
# ('2019-04-05', 7316.570000),
# ('2019-04-08', 7272.060000),
# ('2019-04-09', 7295.520000),
# ('2019-04-10', 7263.890000),
# ('2019-04-11', 7270.840000),
# ('2019-04-12', 7299.310000),
# ('2019-04-15', 7332.520000),
# ('2019-04-16', 7365.780000),
# ('2019-04-18', 7323.180000),
# ('2019-04-22', 7216.200000),
# ('2019-04-23', 7214.450000),
# ('2019-04-24', 7267.750000),
# ('2019-04-25', 7229.110000),
# ('2019-04-26', 7249.700000),
# ('2019-04-30', 7217.160000),
# ('2019-05-02', 7188.390000),
# ('2019-05-03', 7181.780000),
# ('2019-05-06', 7119.610000),
# ('2019-05-07', 7055.650000),
# ('2019-05-08', 6981.700000),
# ('2019-05-09', 6958.040000),
# ('2019-05-10', 6963.450000),
# ('2019-05-13', 6852.450000),
# ('2019-05-14', 6886.800000),
# ('2019-05-15', 6848.760000),
# ('2019-05-16', 6884.520000),
# ('2019-05-17', 6961.940000),
# ('2019-05-20', 7215.920000),
# ('2019-05-21', 7145.340000),
# ('2019-05-22', 7157.590000),
# ('2019-05-23', 7133.080000),
# ('2019-05-24', 7262.390000),
# ('2019-05-27', 7345.100000),
# ('2019-05-28', 7356.070000),
# ('2019-05-29', 7311.520000),
# ('2019-05-30', 7356.360000),
# ('2019-05-31', 7355.000000),
# ('2019-06-03', 7435.290000),
# ('2019-06-04', 7413.330000),
# ('2019-06-06', 7300.730000),
# ('2019-06-07', 7302.980000),
# ('2019-06-10', 7322.190000),
# ('2019-06-11', 7358.140000),
# ('2019-06-12', 7308.650000),
# ('2019-06-13', 7303.490000),
# ('2019-06-14', 7244.360000),
# ('2019-06-17', 7153.610000),
# ('2019-06-18', 7155.210000),
# ('2019-06-19', 7120.510000),
# ('2019-06-20', 7216.540000),
# ('2019-06-21', 7187.310000),
# ('2019-06-24', 7171.930000),
# ('2019-06-25', 7221.850000),
# ('2019-06-26', 7266.830000),
# ('2019-06-27', 7285.290000),
# ('2019-06-28', 7266.230000),
# ('2019-07-01', 7297.340000),
# ('2019-07-02', 7321.880000),
# ('2019-07-03', 7331.800000),
# ('2019-07-04', 7336.490000),
# ('2019-07-05', 7238.450000),
# ('2019-07-08', 7074.340000),
# ('2019-07-09', 7086.810000),
# ('2019-07-10', 7037.420000),
# ('2019-07-11', 7090.990000),
# ('2019-07-12', 7097.330000),
# ('2019-07-15', 7084.960000),
# ('2019-07-16', 7133.020000),
# ('2019-07-17', 7133.400000),
# ('2019-07-18', 7054.840000),
# ('2019-07-19', 6937.470000),
# ('2019-07-22', 6896.910000),
# ('2019-07-23', 6879.450000),
# ('2019-07-24', 6811.440000),
# ('2019-07-25', 6826.290000),
# ('2019-07-26', 6855.440000),
# ('2019-07-29', 6795.180000),
# ('2019-07-30', 6706.310000),
# ('2019-07-31', 6754.900000),
# ('2019-08-01', 6683.280000),
# ('2019-08-02', 6686.490000),
# ('2019-08-05', 6601.210000),
# ('2019-08-06', 6685.890000),
# ('2019-08-07', 6644.000000),
# ('2019-08-08', 6719.020000),
# ('2019-08-09', 6775.170000),
# ('2019-08-13', 6655.100000),
# ('2019-08-14', 6716.420000),
# ('2019-08-16', 6733.490000),
# ('2019-08-19', 6745.970000),
# ('2019-08-20', 6709.800000),
# ('2019-08-21', 6633.790000),
# ('2019-08-22', 6525.190000),
# ('2019-08-23', 6580.740000),
# ('2019-08-26', 6693.680000),
# ('2019-08-27', 6740.680000),
# ('2019-08-28', 6694.270000),
# ('2019-08-29', 6655.210000),
# ('2019-08-30', 6712.110000),
# ('2019-09-03', 6592.120000),
# ('2019-09-04', 6615.460000),
# ('2019-09-05', 6626.800000),
# ('2019-09-06', 6676.630000),
# ('2019-09-09', 6728.850000),
# ('2019-09-11', 6767.220000),
# ('2019-09-12', 6744.810000),
# ('2019-09-13', 6790.160000),
# ('2019-09-16', 6767.290000),
# ('2019-09-17', 6649.060000),
# ('2019-09-18', 6669.320000),
# ('2019-09-19', 6579.780000),
# ('2019-09-20', 6942.110000),
# ('2019-09-23', 7132.290000),
# ('2019-09-24', 7111.720000),
# ('2019-09-25', 7007.770000),
# ('2019-09-26', 7075.820000),
# ('2019-09-27', 7029.990000),
# ('2019-09-30', 6969.460000),
# ('2019-10-01', 6878.070000),
# ('2019-10-03', 6859.160000),
# ('2019-10-04', 6789.030000),
# ('2019-10-07', 6760.230000),
# ('2019-10-09', 6854.440000),
# ('2019-10-10', 6798.440000),
# ('2019-10-11', 6826.310000),
# ('2019-10-14', 6848.160000),
# ('2019-10-15', 6884.930000),
# ('2019-10-16', 6892.300000),
# ('2019-10-17', 6990.210000),
# ('2019-10-18', 7077.000000),
# ('2019-10-22', 7063.920000),
# ('2019-10-23', 7074.620000),
# ('2019-10-24', 7060.200000),
# ('2019-10-25', 7053.900000),
# ('2019-10-27', 7096.340000),
# ('2019-10-29', 7179.960000),
# ('2019-10-30', 7215.430000),
# ('2019-10-31', 7272.360000),
# ('2019-11-01', 7287.810000),
# ('2019-11-04', 7297.260000),
# ('2019-11-05', 7253.730000),
# ('2019-11-06', 7263.890000),
# ('2019-11-07', 7302.950000),
# ('2019-11-08', 7248.060000),
# ('2019-11-11', 7269.870000),
# ('2019-11-13', 7219.070000),
# ('2019-11-14', 7234.610000),
# ('2019-11-15', 7251.170000),
# ('2019-11-18', 7271.920000),
# ('2019-11-19', 7295.430000),
# ('2019-11-20', 7320.500000),
# ('2019-11-21', 7285.190000),
# ('2019-11-25', 7356.400000),
# ('2019-11-26', 7316.490000),
# ('2019-11-27', 7362.880000),
# ('2019-11-28', 7411.480000),
# ('2019-11-29', 7390.460000),
# ('2019-12-02', 7368.650000),
# ('2019-12-03', 7316.590000),
# ('2019-12-04', 7352.270000),
# ('2019-12-05', 7333.740000),
# ('2019-12-06', 7256.380000),
# ('2019-12-09', 7259.870000),
# ('2019-12-10', 7193.810000),
# ('2019-12-11', 7230.710000),
# ('2019-12-12', 7276.670000),
# ('2019-12-13', 7345.340000),
# ('2019-12-16', 7318.210000),
# ('2019-12-17', 7368.300000),
# ('2019-12-18', 7375.430000),
# ('2019-12-19', 7392.510000),
# ('2019-12-20', 7397.960000),
# ('2019-12-23', 7389.240000),
# ('2019-12-24', 7372.090000),
# | |
elif match_count == 4:
# remove swap location from matches and add to bonus list
bonus_list.extend(self.get_bonus_list(row, column, temp_matches, 1))
# if length of match list >= 3, add temp matches to matches
if match_count >= 3:
match_list.extend(temp_matches)
return match_list, bonus_list
def remove_row(self, row, column):
"""
Adds entire row to list
:param column:
:param row:
:return:
"""
gem_list = []
for j in range(self.columns):
if self.get_gem_info(row, j) != self.get_gem_info(row, column):
gem_list.append(self.get_gem_info(row, j))
return gem_list
def remove_column(self, row, column):
"""
Add entire column to list
:param row:
:param column:
:return:
"""
gem_list = []
for i in range(self.rows):
if self.get_gem_info(i, column) != self.get_gem_info(row, column):
gem_list.append(self.get_gem_info(i, column))
return gem_list
def remove_all_gems_of_type(self, gem_type: int, row, column):
"""
Add all gems of type gem_type to list
:param column:
:param row:
:param gem_type:
:return:
"""
gem_list = []
for i, j in product(range(self.rows), range(self.columns)):
if self.gem_grid.grid[i][j][0] == gem_type and self.get_gem_info(i, j) != self.get_gem_info(row, column):
gem_list.append(self.get_gem_info(i, j))
return gem_list
def remove_surrounding_gems(self, row: int, column: int):
"""
Add 9 surrounding gems of location row,column to list.
:param row:
:param column:
:return:
"""
gem_list = []
row_max = min(row + 2, self.rows)
row_min = max(row - 1, 0)
col_max = min(column + 2, self.columns)
col_min = max(column - 1, 0)
for i, j in product(range(row_min, row_max), range(col_min, col_max)):
if self.get_gem_info(i, j) != self.get_gem_info(row, column):
gem_list.append(self.get_gem_info(i, j))
return gem_list
def get_row_match(self, row: int, column: int):
"""
rows match count
:param row:
:param column:
:return:
"""
columns = self.columns
grid = self.gem_grid.grid
match_index = column + 1
match_list = [self.get_gem_info(row, column)]
# check if its a match
while match_index < columns and grid[row][column][0] == grid[row][match_index][0]:
# if match, append to list
match_list.append(self.get_gem_info(row, match_index))
match_index += 1
return match_list
def get_column_match(self, row: int, column: int):
"""
find matches along the column
:param row:
:param column:
:return:
"""
rows = self.rows
grid = self.gem_grid.grid
match_index = row + 1
match_list = [self.get_gem_info(row, column)]
# check if its a match
while match_index < rows and grid[row][column][0] == grid[match_index][column][0]:
# if match, append to list
match_list.append(self.get_gem_info(match_index, column))
match_index += 1
return match_list
def get_gem_info(self, row: int, column: int, new_bonus=None, top_row=False):
"""
Return the coordinates of the gem, along with its
type and bonus type. This information is returned
as a tuple. The structure is:
(row, column, type, bonus_type, activation
:param top_row:
:param new_bonus:
:param row:
:param column:
:return:
"""
if top_row:
gem = self.gem_grid.grid[row + 1][column]
else:
gem = self.gem_grid.grid[row][column]
if new_bonus is None:
return row, column, gem[0], gem[1], gem[2]
else:
return row, column, gem[0], new_bonus, gem[2]
def get_bonus_list(self, row, column, temp_matches, bonus_type):
"""
Determines the location of the bonus
:param row:
:param column:
:param temp_matches:
:param bonus_type:
:return:
"""
bonus_list = []
reduced_temp_matches = [(i, j) for i, j, t, bt, a in temp_matches]
# create bonus at swap location otherwise at the first location of the match
if self.swapped_gems[0][:2] in reduced_temp_matches:
# bonus at first swap location
i, j = self.swapped_gems[0][:2]
bonus_gem = self.get_gem_info(i, j, bonus_type)
bonus_list.append(bonus_gem)
elif self.swapped_gems[1][:2] in reduced_temp_matches:
# bonus at second swap location
i, j = self.swapped_gems[1][:2]
bonus_gem = self.get_gem_info(i, j, bonus_type)
bonus_list.append(bonus_gem)
else:
# bonus at first location in match
bonus_gem = self.get_gem_info(row, column, bonus_type)
bonus_list.append(bonus_gem)
return bonus_list
def remove_gems_add_bonuses(self, init=False):
"""
This loops over the gems in the match_list and
removes them all from the grid.
:return:
"""
self.ice_removed = []
for row, column, gem_type, bonus_type, activation in self.match_list:
self.gem_grid.grid[row][column] = -1
if not init:
self.remove_ice(row, column)
# try to free medals after removing ice
self.free_medals()
for row, column, gem_type, bonus_type, activation in self.bonus_list:
self.gem_grid.grid[row][column] = (gem_type, bonus_type, activation)
if not init:
self.remove_ice(row, column)
def pull_gems_down(self):
"""
Pulls gems down vertically to simulate gravity.
We create new gems if required.
At the end of this method the gems will be at the position
listed in new_positions.
:return:
"""
repeat = False
original_positions = []
new_positions = []
additions = []
grid = self.gem_grid.grid
for j in range(self.columns):
for i in range(self.rows - 1, 0, -1):
# start from bottom row
if grid[i][j] == -1 and grid[i - 1][j] != -1:
# if cell j,i is empty and the cell above is not empty, swap them.
repeat = True
original_positions.append(self.get_gem_info(i - 1, j))
grid[i][j], grid[i - 1][j] = grid[i - 1][j], -1
new_positions.append(self.get_gem_info(i, j))
if grid[0][j] == -1:
# if empty in the top row, create new gem, add to additions list
# and set original position to above top row,
# and new position to top row.
repeat = True
gem = self.new_gem()
grid[0][j] = gem
additions.append(self.get_gem_info(-1, j, top_row=True))
original_positions.append(self.get_gem_info(-1, j, top_row=True))
new_positions.append(self.get_gem_info(0, j))
self.additions = additions
self.movements = [original_positions, new_positions]
return repeat
def remove_ice(self, row: int, column: int):
"""
If ice is present in the grid cell,
reduce the layer by 1.
:param row:
:param column:
:return:
"""
grid = self.ice_grid.grid
if grid[row][column] != -1:
# if there is ice, decrease the layer by 1
new_layer = grid[row][column] - 1
grid[row][column] = new_layer
# add to the ice_removed list
self.ice_removed.append((row, column, new_layer))
def free_medals(self):
"""
Loops over the medal locations list and
frees any completely uncovered medals.
:return:
"""
self.medals_removed = []
ice_grid = self.ice_grid.grid
medal_grid = self.medal_grid.grid
for row, column in product(range(self.rows), range(self.columns)):
if ice_grid[row][column] == -1 and medal_grid[row][column] == 0 and self.is_freeable_medal(row, column):
# medal is completely uncovered, remove it from grid
self.remove_medal(row, column)
# decrement medals
self.medals_remaining -= 1
def remove_medal(self, row: int, column: int):
"""
Loops over the 4 portions of the medal
and sets them to -1.
Also removes medal portions from medal_locations list.
:param row:
:param column:
:return:
"""
for i, j in product(range(2), range(2)):
# remove from grid
self.medal_grid.grid[row + i][column + j] = -1
# remove from medal locations list
portion = j + 2 * i
# self.medal_locations.remove((row + i, column + j, portion))
# add to medals removed list
self.medals_removed.append((row + i, column + j, portion))
def is_freeable_medal(self, row: int, column: int):
"""
Returns true if medal completely uncovered from ice,
otherwise return false
:param row:
:param column:
:return:
"""
for i, j in product(range(2), range(2)):
if self.ice_grid.grid[row + i][column + j] != -1:
return False
return True
def get_game_state(self):
"""
returns a tuple of 4 of arrays.
(gem_type, bonus_type, ice, medal_portion)
:return:
"""
# get medals uncovered and score
medals_uncovered = self.total_medals - self.medals_remaining
score = self.score
game_state = str(score) + '\t' + str(medals_uncovered) + '\t'
gems = self.gem_grid_copy
ice = self.ice_grid.grid
medals = self.medal_grid.grid
for i in range(self.rows):
for j in range(self.columns):
# get type, bonus_type, ice_layer
s = str(gems[i][j][0]) + '\t' + str(gems[i][j][1]) + '\t' + str(ice[i][j]) + '\t'
# get medal portion
m = -1
if self.medal_state[i][j] != -1:
m = self.medal_state[i][j]
elif ice[i][j] == -1:
m = medals[i][j]
self.medal_state[i][j] = m
# combine to get t, bt, ice_layer, portion
s = s + str(m) + '\t'
game_state += s
return game_state
def get_progress_state(self):
"""
Returns a string representing the progress state
in the form:
'medals_uncovered score action' eg:
'1 \t 900 \t 0102'
where action is: row1 column1 row2 column2
The action is the action TO BE performed from the
current state.
:return:
"""
# unpack the swap locations to get the 'action'
action = self.action
action = str(action[0][0]) + '-' + str(action[0][1]) + '-' + str(action[1][0]) + '-' + str(action[1][1])
return action
def get_obscured_game_state(self):
gem_grid = deepcopy(self.gem_grid.grid)
ice_grid = deepcopy(self.ice_grid.grid)
medal_grid = self.medal_grid.grid
medal_grid = [[medal if ice else -1 for ice, medal in zip(*rows)] for rows in zip(ice_grid, medal_grid)]
moves_medals = (self.moves_remaining, self.medals_remaining)
return gem_grid, ice_grid, medal_grid, moves_medals
def get_full_game_state(self):
gem_grid = deepcopy(self.gem_grid.grid)
ice_grid = deepcopy(self.ice_grid.grid)
medal_grid = deepcopy(self.medal_grid.grid)
moves_medals = (self.moves_remaining, self.medals_remaining)
return gem_grid, ice_grid, medal_grid, moves_medals
def move_made(self):
self.moves_remaining -= 1
def check_medal_boundaries(self, y_coord: int, x_coord: int):
"""
Method to check is a medal can be added at a certain location
:param y_coord: y coordinate to check | |
self._state.following.append(self.FOLLOW_else_if_clause_in_if_stmt543)
else_if_clause50 = self.else_if_clause()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, else_if_clause50.tree)
else:
break #loop9
# Expr.g:99:30: ( else_clause )?
alt10 = 2
LA10_0 = self.input.LA(1)
if (LA10_0 == 112) :
alt10 = 1
if alt10 == 1:
# Expr.g:99:30: else_clause
pass
self._state.following.append(self.FOLLOW_else_clause_in_if_stmt546)
else_clause51 = self.else_clause()
self._state.following.pop()
if self._state.backtracking == 0:
self._adaptor.addChild(root_0, else_clause51.tree)
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "if_stmt"
class if_clause_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.if_clause_return, self).__init__()
self.tree = None
# $ANTLR start "if_clause"
# Expr.g:101:1: if_clause : 'if' '(' expr ')' block -> ^( IF expr block ) ;
def if_clause(self, ):
retval = self.if_clause_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal52 = None
char_literal53 = None
char_literal55 = None
expr54 = None
block56 = None
string_literal52_tree = None
char_literal53_tree = None
char_literal55_tree = None
stream_75 = RewriteRuleTokenStream(self._adaptor, "token 75")
stream_118 = RewriteRuleTokenStream(self._adaptor, "token 118")
stream_76 = RewriteRuleTokenStream(self._adaptor, "token 76")
stream_block = RewriteRuleSubtreeStream(self._adaptor, "rule block")
stream_expr = RewriteRuleSubtreeStream(self._adaptor, "rule expr")
try:
try:
# Expr.g:102:2: ( 'if' '(' expr ')' block -> ^( IF expr block ) )
# Expr.g:102:4: 'if' '(' expr ')' block
pass
string_literal52 = self.match(self.input, 118, self.FOLLOW_118_in_if_clause557)
if self._state.backtracking == 0:
stream_118.add(string_literal52)
char_literal53 = self.match(self.input, 75, self.FOLLOW_75_in_if_clause559)
if self._state.backtracking == 0:
stream_75.add(char_literal53)
self._state.following.append(self.FOLLOW_expr_in_if_clause561)
expr54 = self.expr()
self._state.following.pop()
if self._state.backtracking == 0:
stream_expr.add(expr54.tree)
char_literal55 = self.match(self.input, 76, self.FOLLOW_76_in_if_clause563)
if self._state.backtracking == 0:
stream_76.add(char_literal55)
self._state.following.append(self.FOLLOW_block_in_if_clause565)
block56 = self.block()
self._state.following.pop()
if self._state.backtracking == 0:
stream_block.add(block56.tree)
# AST Rewrite
# elements: block, expr
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 103:3: -> ^( IF expr block )
# Expr.g:103:6: ^( IF expr block )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(IF, "IF")
, root_1)
self._adaptor.addChild(root_1, stream_expr.nextTree())
self._adaptor.addChild(root_1, stream_block.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "if_clause"
class else_if_clause_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.else_if_clause_return, self).__init__()
self.tree = None
# $ANTLR start "else_if_clause"
# Expr.g:105:1: else_if_clause : 'else' if_clause -> ^( ELSE_IF if_clause ) ;
def else_if_clause(self, ):
retval = self.else_if_clause_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal57 = None
if_clause58 = None
string_literal57_tree = None
stream_112 = RewriteRuleTokenStream(self._adaptor, "token 112")
stream_if_clause = RewriteRuleSubtreeStream(self._adaptor, "rule if_clause")
try:
try:
# Expr.g:106:2: ( 'else' if_clause -> ^( ELSE_IF if_clause ) )
# Expr.g:106:4: 'else' if_clause
pass
string_literal57 = self.match(self.input, 112, self.FOLLOW_112_in_else_if_clause587)
if self._state.backtracking == 0:
stream_112.add(string_literal57)
self._state.following.append(self.FOLLOW_if_clause_in_else_if_clause589)
if_clause58 = self.if_clause()
self._state.following.pop()
if self._state.backtracking == 0:
stream_if_clause.add(if_clause58.tree)
# AST Rewrite
# elements: if_clause
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 107:3: -> ^( ELSE_IF if_clause )
# Expr.g:107:6: ^( ELSE_IF if_clause )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(ELSE_IF, "ELSE_IF")
, root_1)
self._adaptor.addChild(root_1, stream_if_clause.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "else_if_clause"
class else_clause_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.else_clause_return, self).__init__()
self.tree = None
# $ANTLR start "else_clause"
# Expr.g:109:1: else_clause : 'else' block -> ^( ELSE block ) ;
def else_clause(self, ):
retval = self.else_clause_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal59 = None
block60 = None
string_literal59_tree = None
stream_112 = RewriteRuleTokenStream(self._adaptor, "token 112")
stream_block = RewriteRuleSubtreeStream(self._adaptor, "rule block")
try:
try:
# Expr.g:110:2: ( 'else' block -> ^( ELSE block ) )
# Expr.g:110:4: 'else' block
pass
string_literal59 = self.match(self.input, 112, self.FOLLOW_112_in_else_clause609)
if self._state.backtracking == 0:
stream_112.add(string_literal59)
self._state.following.append(self.FOLLOW_block_in_else_clause611)
block60 = self.block()
self._state.following.pop()
if self._state.backtracking == 0:
stream_block.add(block60.tree)
# AST Rewrite
# elements: block
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 111:3: -> ^( ELSE block )
# Expr.g:111:6: ^( ELSE block )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(ELSE, "ELSE")
, root_1)
self._adaptor.addChild(root_1, stream_block.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "else_clause"
class while_stmt_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.while_stmt_return, self).__init__()
self.tree = None
# $ANTLR start "while_stmt"
# Expr.g:114:1: while_stmt : 'while' '(' expr ')' block -> ^( WHILE expr block ) ;
def while_stmt(self, ):
retval = self.while_stmt_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal61 = None
char_literal62 = None
char_literal64 = None
expr63 = None
block65 = None
string_literal61_tree = None
char_literal62_tree = None
char_literal64_tree = None
stream_131 = RewriteRuleTokenStream(self._adaptor, "token 131")
stream_75 = RewriteRuleTokenStream(self._adaptor, "token 75")
stream_76 = RewriteRuleTokenStream(self._adaptor, "token 76")
stream_block = RewriteRuleSubtreeStream(self._adaptor, "rule block")
stream_expr = RewriteRuleSubtreeStream(self._adaptor, "rule expr")
try:
try:
# Expr.g:115:2: ( 'while' '(' expr ')' block -> ^( WHILE expr block ) )
# Expr.g:115:4: 'while' '(' expr ')' block
pass
string_literal61 = self.match(self.input, 131, self.FOLLOW_131_in_while_stmt632)
if self._state.backtracking == 0:
stream_131.add(string_literal61)
char_literal62 = self.match(self.input, 75, self.FOLLOW_75_in_while_stmt634)
if self._state.backtracking == 0:
stream_75.add(char_literal62)
self._state.following.append(self.FOLLOW_expr_in_while_stmt636)
expr63 = self.expr()
self._state.following.pop()
if self._state.backtracking == 0:
stream_expr.add(expr63.tree)
char_literal64 = self.match(self.input, 76, self.FOLLOW_76_in_while_stmt638)
if self._state.backtracking == 0:
stream_76.add(char_literal64)
self._state.following.append(self.FOLLOW_block_in_while_stmt640)
block65 = self.block()
self._state.following.pop()
if self._state.backtracking == 0:
stream_block.add(block65.tree)
# AST Rewrite
# elements: expr, block
# token labels:
# rule labels: retval
# token list labels:
# rule list labels:
# wildcard labels:
if self._state.backtracking == 0:
retval.tree = root_0
if retval is not None:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "rule retval", retval.tree)
else:
stream_retval = RewriteRuleSubtreeStream(self._adaptor, "token retval", None)
root_0 = self._adaptor.nil()
# 116:3: -> ^( WHILE expr block )
# Expr.g:116:6: ^( WHILE expr block )
root_1 = self._adaptor.nil()
root_1 = self._adaptor.becomeRoot(
self._adaptor.createFromType(WHILE, "WHILE")
, root_1)
self._adaptor.addChild(root_1, stream_expr.nextTree())
self._adaptor.addChild(root_1, stream_block.nextTree())
self._adaptor.addChild(root_0, root_1)
retval.tree = root_0
retval.stop = self.input.LT(-1)
if self._state.backtracking == 0:
retval.tree = self._adaptor.rulePostProcessing(root_0)
self._adaptor.setTokenBoundaries(retval.tree, retval.start, retval.stop)
except RecognitionException, re:
self.reportError(re)
self.recover(self.input, re)
retval.tree = self._adaptor.errorNode(self.input, retval.start, self.input.LT(-1), re)
finally:
pass
return retval
# $ANTLR end "while_stmt"
class do_while_stmt_return(ParserRuleReturnScope):
def __init__(self):
super(ExprParser.do_while_stmt_return, self).__init__()
self.tree = None
# $ANTLR start "do_while_stmt"
# Expr.g:119:1: do_while_stmt : 'do' block 'while' '(' expr ')' ';' -> ^( DO_WHILE block expr ) ;
def do_while_stmt(self, ):
retval = self.do_while_stmt_return()
retval.start = self.input.LT(1)
root_0 = None
string_literal66 = None
string_literal68 = None
char_literal69 = None
char_literal71 = None
char_literal72 = None
block67 = None
expr70 = None
string_literal66_tree = None
string_literal68_tree = None
char_literal69_tree = None
char_literal71_tree = None
char_literal72_tree = None
stream_92 = RewriteRuleTokenStream(self._adaptor, "token 92")
stream_111 = RewriteRuleTokenStream(self._adaptor, "token 111")
stream_131 = RewriteRuleTokenStream(self._adaptor, "token 131")
stream_75 = RewriteRuleTokenStream(self._adaptor, "token 75")
stream_76 = RewriteRuleTokenStream(self._adaptor, "token 76")
stream_block = RewriteRuleSubtreeStream(self._adaptor, "rule block")
stream_expr = RewriteRuleSubtreeStream(self._adaptor, "rule expr")
try:
try:
# Expr.g:120:2: ( 'do' block 'while' '(' expr ')' ';' -> ^( DO_WHILE block expr ) )
# Expr.g:120:4: 'do' block 'while' '(' expr ')' ';'
pass
string_literal66 = self.match(self.input, 111, self.FOLLOW_111_in_do_while_stmt663)
if self._state.backtracking == 0:
stream_111.add(string_literal66)
self._state.following.append(self.FOLLOW_block_in_do_while_stmt665)
block67 = self.block()
self._state.following.pop()
if self._state.backtracking == 0:
stream_block.add(block67.tree)
string_literal68 = self.match(self.input, 131, self.FOLLOW_131_in_do_while_stmt667)
if self._state.backtracking == 0:
stream_131.add(string_literal68)
char_literal69 = | |
Constraint(expr= m.b5 - m.b13 + m.b40 <= 1)
m.c94 = Constraint(expr= m.b5 - m.b15 + m.b41 <= 1)
m.c95 = Constraint(expr= m.b5 - m.b17 + m.b42 <= 1)
m.c96 = Constraint(expr= m.b5 - m.b19 + m.b43 <= 1)
m.c97 = Constraint(expr= m.b5 - m.b21 + m.b44 <= 1)
m.c98 = Constraint(expr= m.b5 - m.b23 + m.b45 <= 1)
m.c99 = Constraint(expr= m.b5 - m.b25 + m.b46 <= 1)
m.c100 = Constraint(expr= m.b7 - m.b9 + m.b47 <= 1)
m.c101 = Constraint(expr= m.b7 - m.b11 + m.b48 <= 1)
m.c102 = Constraint(expr= m.b7 - m.b13 + m.b49 <= 1)
m.c103 = Constraint(expr= m.b7 - m.b15 + m.b50 <= 1)
m.c104 = Constraint(expr= m.b7 - m.b17 + m.b51 <= 1)
m.c105 = Constraint(expr= m.b7 - m.b19 + m.b52 <= 1)
m.c106 = Constraint(expr= m.b7 - m.b21 + m.b53 <= 1)
m.c107 = Constraint(expr= m.b7 - m.b23 + m.b54 <= 1)
m.c108 = Constraint(expr= m.b7 - m.b25 + m.b55 <= 1)
m.c109 = Constraint(expr= m.b9 - m.b11 + m.b56 <= 1)
m.c110 = Constraint(expr= m.b9 - m.b13 + m.b57 <= 1)
m.c111 = Constraint(expr= m.b9 - m.b15 + m.b58 <= 1)
m.c112 = Constraint(expr= m.b9 - m.b17 + m.b59 <= 1)
m.c113 = Constraint(expr= m.b9 - m.b19 + m.b60 <= 1)
m.c114 = Constraint(expr= m.b9 - m.b21 + m.b61 <= 1)
m.c115 = Constraint(expr= m.b9 - m.b23 + m.b62 <= 1)
m.c116 = Constraint(expr= m.b9 - m.b25 + m.b63 <= 1)
m.c117 = Constraint(expr= m.b11 - m.b13 + m.b64 <= 1)
m.c118 = Constraint(expr= m.b11 - m.b15 + m.b65 <= 1)
m.c119 = Constraint(expr= m.b11 - m.b17 + m.b66 <= 1)
m.c120 = Constraint(expr= m.b11 - m.b19 + m.b67 <= 1)
m.c121 = Constraint(expr= m.b11 - m.b21 + m.b68 <= 1)
m.c122 = Constraint(expr= m.b11 - m.b23 + m.b69 <= 1)
m.c123 = Constraint(expr= m.b11 - m.b25 + m.b70 <= 1)
m.c124 = Constraint(expr= m.b13 - m.b15 + m.b71 <= 1)
m.c125 = Constraint(expr= m.b13 - m.b17 + m.b72 <= 1)
m.c126 = Constraint(expr= m.b13 - m.b19 + m.b73 <= 1)
m.c127 = Constraint(expr= m.b13 - m.b21 + m.b74 <= 1)
m.c128 = Constraint(expr= m.b13 - m.b23 + m.b75 <= 1)
m.c129 = Constraint(expr= m.b13 - m.b25 + m.b76 <= 1)
m.c130 = Constraint(expr= m.b15 - m.b17 + m.b77 <= 1)
m.c131 = Constraint(expr= m.b15 - m.b19 + m.b78 <= 1)
m.c132 = Constraint(expr= m.b15 - m.b21 + m.b79 <= 1)
m.c133 = Constraint(expr= m.b15 - m.b23 + m.b80 <= 1)
m.c134 = Constraint(expr= m.b15 - m.b25 + m.b81 <= 1)
m.c135 = Constraint(expr= m.b17 - m.b19 + m.b82 <= 1)
m.c136 = Constraint(expr= m.b17 - m.b21 + m.b83 <= 1)
m.c137 = Constraint(expr= m.b17 - m.b23 + m.b84 <= 1)
m.c138 = Constraint(expr= m.b17 - m.b25 + m.b85 <= 1)
m.c139 = Constraint(expr= m.b19 - m.b21 + m.b86 <= 1)
m.c140 = Constraint(expr= m.b19 - m.b23 + m.b87 <= 1)
m.c141 = Constraint(expr= m.b19 - m.b25 + m.b88 <= 1)
m.c142 = Constraint(expr= m.b21 - m.b23 + m.b89 <= 1)
m.c143 = Constraint(expr= m.b21 - m.b25 + m.b90 <= 1)
m.c144 = Constraint(expr= m.b23 - m.b25 + m.b91 <= 1)
m.c145 = Constraint(expr= m.b26 - m.b27 + m.b37 <= 1)
m.c146 = Constraint(expr= m.b26 - m.b28 + m.b38 <= 1)
m.c147 = Constraint(expr= m.b26 - m.b29 + m.b39 <= 1)
m.c148 = Constraint(expr= m.b26 - m.b30 + m.b40 <= 1)
m.c149 = Constraint(expr= m.b26 - m.b31 + m.b41 <= 1)
m.c150 = Constraint(expr= m.b26 - m.b32 + m.b42 <= 1)
m.c151 = Constraint(expr= m.b26 - m.b33 + m.b43 <= 1)
m.c152 = Constraint(expr= m.b26 - m.b34 + m.b44 <= 1)
m.c153 = Constraint(expr= m.b26 - m.b35 + m.b45 <= 1)
m.c154 = Constraint(expr= m.b26 - m.b36 + m.b46 <= 1)
m.c155 = Constraint(expr= m.b27 - m.b28 + m.b47 <= 1)
m.c156 = Constraint(expr= m.b27 - m.b29 + m.b48 <= 1)
m.c157 = Constraint(expr= m.b27 - m.b30 + m.b49 <= 1)
m.c158 = Constraint(expr= m.b27 - m.b31 + m.b50 <= 1)
m.c159 = Constraint(expr= m.b27 - m.b32 + m.b51 <= 1)
m.c160 = Constraint(expr= m.b27 - m.b33 + m.b52 <= 1)
m.c161 = Constraint(expr= m.b27 - m.b34 + m.b53 <= 1)
m.c162 = Constraint(expr= m.b27 - m.b35 + m.b54 <= 1)
m.c163 = Constraint(expr= m.b27 - m.b36 + m.b55 <= 1)
m.c164 = Constraint(expr= m.b28 - m.b29 + m.b56 <= 1)
m.c165 = Constraint(expr= m.b28 - m.b30 + m.b57 <= 1)
m.c166 = Constraint(expr= m.b28 - m.b31 + m.b58 <= 1)
m.c167 = Constraint(expr= m.b28 - m.b32 + m.b59 <= 1)
m.c168 = Constraint(expr= m.b28 - m.b33 + m.b60 <= 1)
m.c169 = Constraint(expr= m.b28 - m.b34 + m.b61 <= 1)
m.c170 = Constraint(expr= m.b28 - m.b35 + m.b62 <= 1)
m.c171 = Constraint(expr= m.b28 - m.b36 + m.b63 <= 1)
m.c172 = Constraint(expr= m.b29 - m.b30 + m.b64 <= 1)
m.c173 = Constraint(expr= m.b29 - m.b31 + m.b65 <= 1)
m.c174 = Constraint(expr= m.b29 - m.b32 + m.b66 <= 1)
m.c175 = Constraint(expr= m.b29 - m.b33 + m.b67 <= 1)
m.c176 = Constraint(expr= m.b29 - m.b34 + m.b68 <= 1)
m.c177 = Constraint(expr= m.b29 - m.b35 + m.b69 <= 1)
m.c178 = Constraint(expr= m.b29 - m.b36 + m.b70 <= 1)
m.c179 = Constraint(expr= m.b30 - m.b31 + m.b71 <= 1)
m.c180 = Constraint(expr= m.b30 - m.b32 + m.b72 <= 1)
m.c181 = Constraint(expr= m.b30 - m.b33 + m.b73 <= 1)
m.c182 = Constraint(expr= m.b30 - m.b34 + m.b74 <= 1)
m.c183 = Constraint(expr= m.b30 - m.b35 + m.b75 <= 1)
m.c184 = Constraint(expr= m.b30 - m.b36 + m.b76 <= 1)
m.c185 = Constraint(expr= m.b31 - m.b32 + m.b77 <= 1)
m.c186 = Constraint(expr= m.b31 - m.b33 + m.b78 <= 1)
m.c187 = Constraint(expr= m.b31 - m.b34 + m.b79 <= 1)
m.c188 = Constraint(expr= m.b31 - m.b35 + m.b80 <= 1)
m.c189 = Constraint(expr= m.b31 - m.b36 + m.b81 <= 1)
m.c190 = Constraint(expr= m.b32 - m.b33 + m.b82 <= 1)
m.c191 = Constraint(expr= m.b32 - m.b34 + m.b83 <= 1)
m.c192 = Constraint(expr= m.b32 - m.b35 + m.b84 <= 1)
m.c193 = Constraint(expr= m.b32 - m.b36 + m.b85 <= 1)
m.c194 = Constraint(expr= m.b33 - m.b34 + m.b86 <= 1)
m.c195 = Constraint(expr= m.b33 - m.b35 + m.b87 <= 1)
m.c196 = Constraint(expr= m.b33 - m.b36 + m.b88 <= 1)
m.c197 = Constraint(expr= m.b34 - m.b35 + m.b89 <= 1)
m.c198 = Constraint(expr= m.b34 - m.b36 + m.b90 <= 1)
m.c199 = Constraint(expr= m.b35 - m.b36 + m.b91 <= 1)
m.c200 = Constraint(expr= m.b37 - m.b38 + m.b47 <= 1)
m.c201 = Constraint(expr= m.b37 - m.b39 + m.b48 <= 1)
m.c202 = Constraint(expr= m.b37 - m.b40 + m.b49 <= 1)
m.c203 = Constraint(expr= m.b37 - m.b41 + m.b50 <= 1)
m.c204 = Constraint(expr= m.b37 - m.b42 + m.b51 <= 1)
m.c205 = Constraint(expr= m.b37 - m.b43 + m.b52 <= 1)
m.c206 = Constraint(expr= m.b37 - m.b44 + m.b53 <= 1)
m.c207 = Constraint(expr= m.b37 - m.b45 + m.b54 <= 1)
m.c208 = Constraint(expr= m.b37 - m.b46 + m.b55 <= 1)
m.c209 = Constraint(expr= m.b38 - m.b39 + m.b56 <= 1)
m.c210 = Constraint(expr= m.b38 - m.b40 + m.b57 <= 1)
m.c211 = Constraint(expr= m.b38 - m.b41 + m.b58 <= 1)
m.c212 = Constraint(expr= m.b38 - m.b42 + m.b59 <= 1)
m.c213 = Constraint(expr= m.b38 - m.b43 + m.b60 <= 1)
m.c214 = Constraint(expr= m.b38 - m.b44 + m.b61 <= 1)
m.c215 = Constraint(expr= m.b38 - m.b45 + m.b62 <= 1)
m.c216 = Constraint(expr= m.b38 - m.b46 + m.b63 <= 1)
m.c217 = Constraint(expr= m.b39 - m.b40 + m.b64 <= 1)
m.c218 = Constraint(expr= m.b39 - m.b41 + m.b65 <= 1)
m.c219 = Constraint(expr= m.b39 - m.b42 + m.b66 <= 1)
m.c220 = Constraint(expr= m.b39 - m.b43 + m.b67 <= 1)
m.c221 = Constraint(expr= m.b39 - m.b44 + m.b68 <= 1)
m.c222 = Constraint(expr= m.b39 - m.b45 + m.b69 <= 1)
m.c223 = Constraint(expr= m.b39 - m.b46 + m.b70 <= 1)
m.c224 = Constraint(expr= m.b40 - m.b41 + m.b71 <= 1)
m.c225 = Constraint(expr= m.b40 - m.b42 + m.b72 <= 1)
m.c226 = Constraint(expr= m.b40 - m.b43 + m.b73 <= 1)
m.c227 = Constraint(expr= m.b40 - m.b44 + m.b74 <= 1)
m.c228 = Constraint(expr= m.b40 - m.b45 + m.b75 <= 1)
m.c229 = Constraint(expr= m.b40 - m.b46 + m.b76 <= 1)
m.c230 = Constraint(expr= m.b41 - m.b42 + m.b77 <= 1)
m.c231 = Constraint(expr= m.b41 - m.b43 + m.b78 <= 1)
m.c232 = Constraint(expr= m.b41 - m.b44 + m.b79 <= 1)
m.c233 = Constraint(expr= m.b41 - m.b45 + m.b80 <= 1)
m.c234 = Constraint(expr= m.b41 - m.b46 + m.b81 <= 1)
m.c235 = Constraint(expr= m.b42 | |
out = _responds(RESULT_FAILURE, msg="No such cmd")
return out
class CMD_ComingEpisodes(ApiCall):
_help = {"desc": "display the coming episodes",
"optionalParameters": {"sort": {"desc": "change the sort order"},
"type": {"desc": "one or more of allowedValues separated by |"},
"paused": {"desc": "0 to exclude paused shows, 1 to include them, or omitted to use the SB default"},
}
}
def __init__(self, args, kwargs):
# required
# optional
self.sort, args = self.check_params(args, kwargs, "sort", "date", False, "string", ["date", "show", "network"])
self.type, args = self.check_params(args, kwargs, "type", "today|missed|soon|later", False, "list", ["missed", "later", "today", "soon"])
self.paused, args = self.check_params(args, kwargs, "paused", sickbeard.COMING_EPS_DISPLAY_PAUSED, False, "int", [0, 1])
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" display the coming episodes """
today = datetime.date.today().toordinal()
next_week = (datetime.date.today() + datetime.timedelta(days=7)).toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=3)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
myDB = db.DBConnection(row_type="dict")
sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, next_week] + qualList)
for cur_result in sql_results:
done_show_list.append(int(cur_result["tvdbid"]))
more_sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN (" + ','.join(['?'] * len(done_show_list)) + ") AND tv_shows.tvdb_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join(['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results += more_sql_results
more_sql_results = myDB.select("SELECT airdate, airs, episode, name AS 'ep_name', description AS 'ep_plot', network, season, showid AS 'tvdbid', show_name, tv_shows.quality AS quality, tv_shows.status AS 'show_status', tv_shows.paused AS 'paused' FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join(['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList)
sql_results += more_sql_results
# sort by air date
sorts = {
'date': (lambda x, y: cmp(int(x["airdate"]), int(y["airdate"]))),
'show': (lambda a, b: cmp(a["show_name"], b["show_name"])),
'network': (lambda a, b: cmp(a["network"], b["network"])),
}
sql_results.sort(sorts[self.sort])
finalEpResults = {}
# add all requested types or all
for curType in self.type:
finalEpResults[curType] = []
for ep in sql_results:
"""
Missed: yesterday... (less than 1week)
Today: today
Soon: tomorrow till next week
Later: later than next week
"""
if ep["paused"] and not self.paused:
continue
status = "soon"
if ep["airdate"] < today:
status = "missed"
elif ep["airdate"] >= next_week:
status = "later"
elif ep["airdate"] >= today and ep["airdate"] < next_week:
if ep["airdate"] == today:
status = "today"
else:
status = "soon"
# skip unwanted
if self.type != None and not status in self.type:
continue
ordinalAirdate = int(ep["airdate"])
if not ep["network"]:
ep["network"] = ""
ep["airdate"] = _ordinal_to_dateForm(ordinalAirdate)
ep["quality"] = _get_quality_string(ep["quality"])
# clean up tvdb horrible airs field
ep["airs"] = str(ep["airs"]).replace('am', ' AM').replace('pm', ' PM').replace(' ', ' ')
# start day of the week on 1 (monday)
ep["weekday"] = 1 + datetime.date.fromordinal(ordinalAirdate).weekday()
# TODO: check if this obsolete
if not status in finalEpResults:
finalEpResults[status] = []
finalEpResults[status].append(ep)
myDB.connection.close()
return _responds(RESULT_SUCCESS, finalEpResults)
class CMD_Episode(ApiCall):
_help = {"desc": "display detailed info about an episode",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"},
"season": {"desc": "the season number"},
"episode": {"desc": "the episode number"}
},
"optionalParameters": {"full_path": {"desc": "show the full absolute path (if valid) instead of a relative path for the episode location"}
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", [])
# optional
self.fullPath, args = self.check_params(args, kwargs, "full_path", 0, False, "bool", [])
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" display detailed info about an episode """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if not showObj:
return _responds(RESULT_FAILURE, msg="Show not found")
myDB = db.DBConnection(row_type="dict")
sqlResults = myDB.select("SELECT name, description, airdate, status, location, file_size, release_name FROM tv_episodes WHERE showid = ? AND episode = ? AND season = ?", [self.tvdbid, self.e, self.s])
if not len(sqlResults) == 1:
raise ApiError("Episode not found")
episode = sqlResults[0]
# handle path options
# absolute vs relative vs broken
showPath = None
try:
showPath = showObj.location
except sickbeard.exceptions.ShowDirNotFoundException:
pass
if bool(self.fullPath) == True and showPath:
pass
elif bool(self.fullPath) == False and showPath:
# using the length because lstrip removes to much
showPathLength = len(showPath) + 1 # the / or \ yeah not that nice i know
episode["location"] = episode["location"][showPathLength:]
elif not showPath: # show dir is broken ... episode path will be empty
episode["location"] = ""
# convert stuff to human form
episode["airdate"] = _ordinal_to_dateForm(episode["airdate"])
status, quality = Quality.splitCompositeStatus(int(episode["status"]))
episode["status"] = _get_status_Strings(status)
episode["quality"] = _get_quality_string(quality)
episode["file_size_human"] = _sizeof_fmt(episode["file_size"])
myDB.connection.close()
return _responds(RESULT_SUCCESS, episode)
class CMD_EpisodeSearch(ApiCall):
_help = {"desc": "search for an episode. the response might take some time",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"},
"season": {"desc": "the season number"},
"episode": {"desc": "the episode number"}
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.e, args = self.check_params(args, kwargs, "episode", None, True, "int", [])
# optional
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" search for an episode """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if not showObj:
return _responds(RESULT_FAILURE, msg="Show not found")
# retrieve the episode object and fail if we can't get one
epObj = showObj.getEpisode(int(self.s), int(self.e))
if isinstance(epObj, str):
return _responds(RESULT_FAILURE, msg="Episode not found")
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(epObj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success == None: #@UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
status, quality = Quality.splitCompositeStatus(epObj.status) #@UnusedVariable
# TODO: split quality and status?
return _responds(RESULT_SUCCESS, {"quality": _get_quality_string(quality)}, "Snatched (" + _get_quality_string(quality) + ")")
return _responds(RESULT_FAILURE, msg='Unable to find episode')
class CMD_EpisodeSetStatus(ApiCall):
_help = {"desc": "set status of an episode or season (when no ep is provided)",
"requiredParameters": {"tvdbid": {"desc": "thetvdb.com unique id of a show"},
"season": {"desc": "the season number"},
"status": {"desc": "the status values: wanted, skipped, archived, ignored"}
},
"optionalParameters": {"episode": {"desc": "the episode number"},
"force": {"desc": "should we replace existing (downloaded) episodes or not"}
}
}
def __init__(self, args, kwargs):
# required
self.tvdbid, args = self.check_params(args, kwargs, "tvdbid", None, True, "int", [])
self.s, args = self.check_params(args, kwargs, "season", None, True, "int", [])
self.status, args = self.check_params(args, kwargs, "status", None, True, "string", ["wanted", "skipped", "archived", "ignored"])
# optional
self.e, args = self.check_params(args, kwargs, "episode", None, False, "int", [])
self.force, args = self.check_params(args, kwargs, "force", 0, False, "bool", [])
# super, missing, help
ApiCall.__init__(self, args, kwargs)
def run(self):
""" set status of an episode or a season (when no ep is provided) """
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(self.tvdbid))
if not showObj:
return _responds(RESULT_FAILURE, msg="Show not found")
# convert the string status to a int
for status in statusStrings.statusStrings:
if str(statusStrings[status]).lower() == str(self.status).lower():
self.status = status
break
else: # if we dont break out of the for loop we got here.
# the allowed values has at least one item that could not be matched against the internal status strings
raise ApiError("The status string could not be matched to a status. Report to Devs!")
ep_list = []
if self.e:
epObj = showObj.getEpisode(self.s, self.e)
if epObj == None:
return _responds(RESULT_FAILURE, msg="Episode not found")
ep_list = [epObj]
else:
# get all episode numbers frome self,season
ep_list = | |
# Authors: <NAME> <<EMAIL>>
"""
Support recovery on simulated data (2D)
=======================================
This example shows the advantages of spatially relaxed inference when
dealing with high-dimensional spatial data. To do so, we compare several
statistical methods that aim at recovering the support, i.e., predictive
features. Among those methods some leverage the spatial structure of the
data. For more details about the inference algorithms presented in this
example or about the generative process used to simulate the data,
please refer to Chevalier et al. (2021) [1]_.
This example corresponds to the experiment described in details in
Chevalier et al. (2021) [1]_. Shortly, to simulate the data, we draw
``n_samples`` i.i.d Gaussian vectors of size ``n_features`` and reshape them
into squares (edges are equal to ``n_features ** (1/2)``). Then, to introduce
some spatial structure, we apply a Gaussian filter that correlates features
that are nearby. The 2D data are then flattened into a design matrix ``X`` to
represent it as a regression setting and to ease the computation of the
simulated target ``y`` (see below). Then, we construct the weight map ``w``
which has the same shape as the 2D data, as it contains four predictive
regions in every corner of the square. Similarly as for the construction
of ``X``, the map ``w`` is finally flattened into a vector ``beta``. Lastly,
to derive the target ``y``, we draw a white Gaussian noise ``epsilon`` and
use a linear generative model: ``y = X beta + epsilon``.
The results of this experiment show that the methods that leverage the spatial
structure of the data are relevant. More precisely, we show that clustered
inference algorithms (e.g., CluDL) and ensembled clustered inference algorithms
(e.g., EnCluDL) are more powerful than the standard inference methods (see also
Chevalier et al. (2021) [1]_). Indeed, when the number of features is much
greater than the number of samples, standard statistical methods are
unlikely to recover the support. Then, the idea of clustered inference is to
compress the data without breaking the spatial structure, leading to a
compressed problem close to the original problem. This leads to a
powerful spatially relaxed inference. Indeed, thanks to the dimension reduction
the support recovery is feasible. However, due to the spatial compression,
there is a limited (and quantifiable) spatial uncertainty concerning the shape
of the estimated support. Finally, by considering several choices of
spatial compression, ensembled clustered inference algorithms reduce
significantly the spatial uncertainty compared to clustered inference
algorithms which consider only one spatial compression.
.. _References:
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2021).
Spatially relaxed inference on high-dimensional linear models.
arXiv preprint arXiv:2106.02590.
"""
#############################################################################
# Imports needed for this script
# ------------------------------
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import FeatureAgglomeration
from hidimstat.scenario import multivariate_simulation
from hidimstat.stat_tools import zscore_from_pval, pval_from_cb
from hidimstat.desparsified_lasso import desparsified_lasso
from hidimstat.clustered_inference import clustered_inference
from hidimstat.ensemble_clustered_inference import ensemble_clustered_inference
#############################################################################
# Specific plotting functions
# ---------------------------
# The functions below are used to plot the results and illustrate the concept
# of spatial tolerance. If you are reading this example for the first time,
# you can skip this section.
#
# The following function builds a 2D map with four active regions that are
# enfolded by thin tolerance regions.
def weight_map_2D_extended(shape, roi_size, delta):
'''Build weight map with visible tolerance region'''
roi_size_extended = roi_size + delta
w = np.zeros(shape + (5,))
w[0:roi_size, 0:roi_size, 0] = 0.5
w[-roi_size:, -roi_size:, 1] = 0.5
w[0:roi_size, -roi_size:, 2] = 0.5
w[-roi_size:, 0:roi_size, 3] = 0.5
w[0:roi_size_extended, 0:roi_size_extended, 0] += 0.5
w[-roi_size_extended:, -roi_size_extended:, 1] += 0.5
w[0:roi_size_extended, -roi_size_extended:, 2] += 0.5
w[-roi_size_extended:, 0:roi_size_extended, 3] += 0.5
for i in range(roi_size_extended):
for j in range(roi_size_extended):
if (i - roi_size) + (j - roi_size) >= delta:
w[i, j, 0] = 0
w[-i-1, -j-1, 1] = 0
w[i, -j-1, 2] = 0
w[-i-1, j, 3] = 0
beta_extended = w.sum(-1).ravel()
return beta_extended
##############################################################################
# To generate a plot that exhibits the true support and the estimated
# supports for every method, we define the two following functions:
def add_one_subplot(ax, map, title):
'''Add one subplot into the summary plot'''
if map is not None:
im = ax.imshow(map)
im.set_clim(-1, 1)
ax.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
labelbottom=False,
labelleft=False)
ax.set_title(title)
else:
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def plot(maps, titles, save_fig=False):
'''Make a summary plot from estimated supports'''
fig, axes = plt.subplots(3, 2, figsize=(4, 6))
for i in range(3):
for j in range(2):
k = i * 2 + j
add_one_subplot(axes[i][j], maps[k], titles[k])
fig.tight_layout()
if save_fig:
figname = 'figures/simu_2D.png'
plt.savefig(figname)
print(f'Save figure to {figname}')
plt.show()
##############################################################################
# Generating the data
# -------------------
#
# After setting the simulation parameters, we run the function that generates
# the 2D scenario that we have briefly described in the first section of this
# example.
# simulation parameters
n_samples = 100
shape = (40, 40)
n_features = shape[1] * shape[0]
roi_size = 4 # size of the edge of the four predictive regions
sigma = 2.0 # noise standard deviation
smooth_X = 1.0 # level of spatial smoothing introduced by the Gaussian filter
# generating the data
X_init, y, beta, epsilon, _, _ = \
multivariate_simulation(n_samples, shape, roi_size, sigma, smooth_X,
seed=1)
##############################################################################
# Choosing inference parameters
# -----------------------------
#
# The choice of the number of clusters depends on several parameters, such as:
# the structure of the data (a higher correlation between neighboring features
# enable a greater dimension reduction, i.e. a smaller number of clusters),
# the number of samples (small datasets require more dimension reduction) and
# the required spatial tolerance (small clusters lead to limited spatial
# uncertainty). Formally, "spatial tolerance" is defined by the largest
# distance from the true support for which the occurence of a false discovery
# is not statistically controlled (c.f. :ref:`References`).
# Theoretically, the spatial tolerance ``delta`` is equal to the largest
# cluster diameter. However this choice is conservative, notably in the case
# of ensembled clustered inference. For these algorithms, we recommend to take
# the average cluster radius. In this example, we choose ``n_clusters = 200``,
# leading to a theoretical spatial tolerance ``delta = 6``. However, it
# turns out that ``delta = 2``, the average cluster radius, would have been
# sufficient for ensembled clustered inference algorithms (see Results).
# hyper-parameters
n_clusters = 200
# inference parameters
fwer_target = 0.1
delta = 6
# computation parameter
n_jobs = 1
##############################################################################
# Computing z-score thresholds for support estimation
# ---------------------------------------------------
#
# Below, we translate the FWER target into z-score targets.
# To compute the z-score targets we also take into account for the multiple
# testing correction. To do so, we consider the Bonferroni correction.
# For methods that do not reduce the feature space, the correction
# consists in dividing the FWER target by the number of features.
# For methods that group features into clusters, the correction
# consists in dividing by the number of clusters.
# computing the z-score thresholds for feature selection
correction_no_cluster = 1. / n_features
correction_cluster = 1. / n_clusters
thr_c = zscore_from_pval((fwer_target / 2) * correction_cluster)
thr_nc = zscore_from_pval((fwer_target / 2) * correction_no_cluster)
#############################################################################
# Inference with several algorithms
# ---------------------------------
#
# First, we compute a reference map that exhibits the true support and
# the theoretical tolerance region.
# compute true support with visible spatial tolerance
beta_extended = weight_map_2D_extended(shape, roi_size, delta)
#############################################################################
# Now, we compute the support estimated by a high-dimensional statistical
# infernece method that does not leverage the data structure. This method
# was introduced by <NAME>. et al. (2014), <NAME>. et al. (2014)
# and <NAME>. et al.. (2014) (full references are available at
# https://ja-che.github.io/hidimstat/).
# and referred to as Desparsified Lasso.
# compute desparsified lasso
beta_hat, cb_min, cb_max = desparsified_lasso(X_init, y, n_jobs=n_jobs)
pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
pval_from_cb(cb_min, cb_max)
# compute estimated support (first method)
zscore = zscore_from_pval(pval, one_minus_pval)
selected_dl = zscore > thr_nc # use the "no clustering threshold"
# compute estimated support (second method)
selected_dl = np.logical_or(pval_corr < fwer_target / 2,
one_minus_pval_corr < fwer_target / 2)
#############################################################################
# Now, we compute the support estimated using a clustered inference algorithm
# (c.f. :ref:`References`) called Clustered Desparsified Lasso (CluDL) since it
# uses the Desparsified Lasso technique after clustering the data.
# Define the FeatureAgglomeration object that performs the clustering.
# This object is necessary to run the current algorithm and the following one.
connectivity = image.grid_to_graph(n_x=shape[0],
n_y=shape[1])
ward = FeatureAgglomeration(n_clusters=n_clusters,
connectivity=connectivity,
linkage='ward')
# clustered desparsified lasso (CluDL)
beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
clustered_inference(X_init, y, ward, n_clusters)
# compute estimated support (first method)
zscore = zscore_from_pval(pval, one_minus_pval)
selected_cdl = zscore > thr_c # use the "clustering threshold"
# compute estimated support (second method)
selected_cdl = np.logical_or(pval_corr < fwer_target / 2,
| |
<filename>hiseq/utils/argsParser.py
# -*- coding: utf-8 -*-
"""
Parse arguments from command line
- trimmer
- align
"""
import argparse
import pathlib
def add_sheet_args():
"""
Prepare sample sheet for Demx
output:
1. sample_name,i7,i5,barcode
2. i7_name,i7,reads
3. sample_name,NULL,NULL,barcode (bc only)
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Prepare sample sheet for demx/demx2',
epilog='''Description:
YY00.xlsx : required columns, ['Sample_name*', 'P7_index_id*', 'Barcode_id*', 'Reads, M']
Output:
1. sample_name,i7,i5,barcode
2. i7_name,i7,reads
3. sample_name,NULL,NULL,barcode
Example:
hiseq sheet -s YY00.xlsx -o data'''
)
parser.add_argument('-s', '--xlsx-table', dest='x', required=True,
help='sample table in xlsx format, eg: YY00.xlsx')
parser.add_argument('-o', '--outdir', dest='outdir',
help='directory to save the reulsts')
return parser
def add_demx_args():
"""
Demultiplexing
"""
parser = argparse.ArgumentParser(description='hiseq demx')
parser.add_argument('-1', '--fq1', required=True,
help='read1 in fastq format, gzipped')
parser.add_argument('-2', '--fq2',
help='read2 in fastq format, gzipped, (optional)')
parser.add_argument('-o', '--outdir', required=True,
help='directory to save the reulsts')
parser.add_argument('-s', '--index-table', dest='index_table', required=True,
help='index list in csv format, [filename,i7,i5,barcode]')
parser.add_argument('--demo', action='store_true',
help='run demo (1M reads) for demostration, default: off')
parser.add_argument('-m', '--mismatch', type=int, default=0,
help='mismatches allowed to search index, default: [0]')
parser.add_argument('-x', '--barcode-in-read2', action='store_true',
help='barcode in read2')
parser.add_argument('-l', '--barcode-n-left', type=int, dest='barcode_n_left',
default=0, help='bases locate on the left of barcode')
parser.add_argument('-r', '--barcode-n-right', type=int, dest='barcode_n_right',
default=0, help='bases locate on the right of barcode')
parser.add_argument('-p', '--threads', type=int, default=1,
help='number of threads, default: [1]')
parser.add_argument('-j', '--parallel-jobs', type=int, dest='parallel_jobs',
default=1, help='number of josb run in parallel, default: [1]')
parser.add_argument('-w', '--overwrite', action='store_true',
help='Overwrite exists files, default: off')
return parser
def add_demx2_args():
"""
Demultiplexing, multi barcode files
"""
parser = argparse.ArgumentParser(description='hiseq demx2')
parser.add_argument('-s', '--xlsx-table', dest='x', required=True,
help="Sample table in (xlsx|csv) format; xlsx: require the columns\
['Sample_name*', 'P7_index_id*', 'Barcode_id*', 'Reads, M']; \
csv: require the columns: ['name', 'i7', 'i5', 'bc', 'reads'] \
the csv file could be `hiseq sheet -s a.xlsx -o data` output: *.demx.csv")
parser.add_argument('-d', '--datadir', dest='datadir', required=True,
help='Directory saving the fastq files')
parser.add_argument('-o', '--outdir', dest='outdir',
help='directory to save the reulsts')
parser.add_argument('--demo', action='store_true',
help='run demo (1M reads) for demostration, default: off')
parser.add_argument('-m', '--mismatch', type=int, default=0,
help='mismatches allowed to search index, default: [0]')
parser.add_argument('-x', '--barcode-in-read2', dest='barcode_in_read2',
action='store_true', help='barcode in read2')
parser.add_argument('-l', '--barcode-n-left', type=int, dest='barcode_n_left',
default=0, help='bases locate on the left of barcode')
parser.add_argument('-r', '--barcode-n-right', type=int, dest='barcode_n_right',
default=0, help='bases locate on the right of barcode')
parser.add_argument('-w', '--overwrite', action='store_true',
help='Overwrite exists files, default: off')
return parser
def add_qc_args():
"""
utils:
- fastqc
"""
parser = argparse.ArgumentParser(
description='hiseq qc, fastqc')
parser.add_argument('-i', '--fq', nargs='+', required=True,
help='reads in FASTQ files, or directory contains fastq files')
parser.add_argument('-o', '--outdir', default=None,
help='The directory to save results.')
parser.add_argument('--fastqc', default='fastqc',
help='The path to the fastqc command, default: [fastqc]')
parser.add_argument('-f', '--overwrite', action='store_true',
help='if spcified, overwrite exists file')
parser.add_argument('-p', '--threads', default=1, type=int,
help='Number of threads for each job, default: [1]')
parser.add_argument('-j', '--parallel-jobs', default=1, type=int,
dest='parallel_jobs',
help='Number of jobs run in parallel, only for multiple fastq files, default: [1]')
return parser
def add_p7_args():
"""
utils:
- fastqc
"""
parser = argparse.ArgumentParser(
description='hiseq p7')
parser.add_argument('-i', '--fq', nargs='+', required=True,
help='reads in FASTQ files, or directory contains fastq files')
parser.add_argument('-o', '--outdir', default=None,
help='The directory to save results.')
parser.add_argument('-f', '--overwrite', action='store_true',
help='if spcified, overwrite exists file')
parser.add_argument('-j', '--parallel-jobs', default=1, type=int,
dest='parallel_jobs',
help='Number of jobs run in parallel, default: [1]')
parser.add_argument('-s', '--save-seq', dest='save_seq',
action='store_true',
help='Save the i7 sequence to file')
return parser
def add_trim_args():
"""
- remove 3' adapter(s) (default: TruSeq RNA-Seq)
- trim low-quality bases on both 5 and 3 end
- trim N reads
- cut N-bases at either end of read
"""
parser = argparse.ArgumentParser(
description='hiseq qc, trim adapters and qc')
parser.add_argument('-1', '--fq1', nargs='+', required=True,
help='reads in FASTQ files, support (*.gz), 1-4 files.')
parser.add_argument('-2', '--fq2', nargs='+', default=None,
help='The read2 of pair-end reads')
parser.add_argument('-o', '--outdir', default=None,
help='The directory to save results.')
parser.add_argument('--library-type', dest='library_type', default=None,
type=str, choices=['TruSeq', 'Nextera', 'smallRNA'],
help='Type of the library structure, \
TruSeq, TruSeq standard library \
Nextera, Tn5 standard library, \
smallRNA, small RNA library')
parser.add_argument('-m', '--len_min', default=15, metavar='len_min',
type=int, help='Minimum length of reads after trimming, defualt [15]')
parser.add_argument('--cut-to-length', default=0, dest='cut_to_length',
type=int,
help='cut reads to from right, default: [0], full length')
parser.add_argument('--recursive', action='store_true',
help='trim adapter recursively')
parser.add_argument('-p', '--threads', default=1, type=int,
help='Number of threads to launch, default [1]')
parser.add_argument('-j', '--parallel-jobs', dest='parallel_jobs',
default=1,
type=int, help='Number of jobs to run in parallel, default [1]')
parser.add_argument('--overwrite', action='store_true',
help='if spcified, overwrite exists file')
## global arguments
parser.add_argument('-q', '--qual-min', default=20, type=int,
dest='qual_min',
help='The cutoff of base quality, default [20]')
parser.add_argument('-e', '--error-rate', default=0.1, type=float,
dest='error_rate',
help='Maximum allowed error rate, default [0.1]')
## specific
parser.add_argument('--rm-untrim', action='store_true',
dest='rm_untrim',
help='discard reads without adapter')
parser.add_argument('--save-untrim', action='store_true',
dest='save_untrim',
help='Save untrim reads to file')
parser.add_argument('--save-too-short', action='store_true',
dest='save_too_short',
help='Save too short reads to file')
parser.add_argument('--save-too-long', action='store_true',
dest='save_too_long',
help='Save too short reads to file')
parser.add_argument('--cut-before-trim', default='0',
dest='cut_before_trim',
help='cut n-bases before trimming adapter; positive value, \
cut from left; minus value, cut from right, eg: 3 or -4 or 3,-4, \
default [0]')
parser.add_argument('--cut-after-trim', default='0',
dest='cut_after_trim',
help='cut n-bases after trimming adapter; positive value, \
cut from left; minus value, cut from right, eg: 3 or -4 or 3,-4, \
default [0]')
parser.add_argument('-a', '--adapter3', default=None,
help='3-Adapter sequence, default [].')
parser.add_argument('-g', '--adapter5', default='',
help='5-Adapter, default: None')
## PE arguments
parser.add_argument('-A', '--Adapter3', default=None,
help='The 3 adapter of read2, default []')
parser.add_argument('-G', '--Adapter5', default=None,
help='The 5 adapter of read1, default: None')
return parser
def add_align_args():
"""
Mapping SE read or one of PE reads to reference genome
using bowtie, STAR, ... (universal)
"""
parser = argparse.ArgumentParser(
description='Align short reads to reference sequence')
parser.add_argument('-1', '--fq1', nargs='+', required=True,
help='path to HiSeq reads in FASTQ format, support multipe \
files, separated by white spaces.')
parser.add_argument('-2', '--fq2', nargs='+', default=None,
help='path to HiSeq read2 of pair-end reads, optional, support \
multiple files separated by white spaces.')
parser.add_argument('-o', '--outdir', default=None,
help='The directory to save results, default, \
current working directory.')
parser.add_argument('-g', '--genome', required=True, default=None,
choices=[None, 'dm6', 'dm3', 'hg38', 'hg19', 'mm10', 'mm9'],
help='Reference genome : dm6, dm3, hg38, hg19, mm10, mm9, default: dm6')
parser.add_argument('-k', '--spikein', default=None,
choices=[None, 'dm6', 'dm3', 'hg38', 'hg19', 'mm10', 'mm9'],
help='Spike-in genome : dm6, dm3, hg38, hg19, mm10, mm9, default: None')
parser.add_argument('--aligner', default='bowtie',
choices=['bowtie', 'bowtie2', 'STAR', 'hisat2', 'bwa', 'kalisto', 'salmon'],
help='Choose which aligner to use. default: bowtie')
## extra: index
parser.add_argument('--index-list', nargs='+', dest='index_list', default=None,
help='ignore genome/spikein, add index directly, default: []')
parser.add_argument('--index-name', nargs='+', dest='index_name', default=None,
help='names for the input index list')
parser.add_argument('-x', '--extra-index', nargs='+', dest="extra_index", default=None,
help='Extra index for alignment, default: []')
parser.add_argument('-n', '--smp-name', dest='smp_name', required=False,
help='Name of the experiment')
parser.add_argument('--index-list-equal', action='store_true',
help='Align reads to each index list in parallel, if specified')
## extra: para
parser.add_argument('--unique-only', action='store_true',
dest='unique_only',
help='if specified, keep unique mapped reads only')
parser.add_argument('--extra-para', dest='extra_para', default=None, type=str,
help='Extra parameters for aligner, eg: -X 2000 for bowtie2. default: [None]')
parser.add_argument('--n-map', dest='n_map', type=int, default=0,
help='Report up to N alignments per read. use -k for bowtie and \
bowtie2 (default 1), --outFilterMultimapNmax for STAR \
(default 20).')
parser.add_argument('--repeat-masked-genome', dest='repeat_masked_genome',
action='store_true',
help='map to repeat masked reference genome, data from EnsEMBL')
parser.add_argument('--path_data',
help='The directory of genome files, default: \
[$HOME/data/genome/]')
## extra rRNA
parser.add_argument('--align-to-chrM', dest='align_to_chrM',
action='store_true',
help='if specified, align to Mitochondrial DNA before genome, for supported genomes')
parser.add_argument('--align-to-rRNA', dest='align_to_rRNA',
action='store_true',
help='if specified, align to rRNA before genome, for supported genomes')
parser.add_argument('--align-to-MT-trRNA', dest='align_to_MT_trRNA',
action='store_true',
help='if specified, align to Mito, tRNA and rRNA before genome, for supported genomes')
parser.add_argument('--genomeLoad', dest='genomeLoad',
default='LoadAndRemove',
choices=['NoSharedMemory', 'LoadAndKeep', 'LoadAndRemove', 'LoadAndExit', 'Remove', 'NoSharedMemory'],
help='--genomeLoad for STAR, default: [LoadAndRemove]'),
parser.add_argument('--overwrite', action='store_true',
help='if spcified, overwrite exists file')
parser.add_argument('-p', '--threads', default=1, type=int,
help='Number of threads for each job, default: [1]')
parser.add_argument('-j', '--parallel-jobs', default=1, type=int,
dest='parallel_jobs',
help='Number of jobs run in parallel, only for multiple fastq files, default: [1]')
return parser
def add_quant_args():
"""
quantify features using featureCounts
support input file: BAM + GTF/BED/GFF ...
"""
parser = argparse.ArgumentParser(
description='quant reads')
parser.add_argument('-i', '--fq1', nargs='+', required=True,
help='BAM files, support multiple files separated by \
white spaces')
parser.add_argument('--overwrite', action='store_true',
help='if spcified, overwrite exists file')
parser.add_argument('--threads', default=8, type=int,
help='Number of threads to launch, default: 8.')
return parser
def add_peak_args():
"""
quantify features using featureCounts
support input file: BAM + GTF/BED/GFF ...
"""
parser = argparse.ArgumentParser(
description='call peaks')
parser.add_argument('-i', '--bam', nargs='+', required=True,
help='BAM files, from IP sample')
parser.add_argument('-c', '--control', nargs='+', required=False,
help='BAM files for control sample, optional')
parser.add_argument('-o', '--outdir', default=None,
help='The directory to save results, default, \
current working directory.')
parser.add_argument('-n', '--name', default=None,
help='The prefix of output files, default: None')
| |
f = [0] + list(self._get_cpo_expr(f).value)
invf = [0] + list(self._get_cpo_expr(invf).value)
self._add_to_model(CpoFunctionCall(Oper_inverse, Type_Constraint, (build_cpo_expr(f), build_cpo_expr(invf))))
def _compile_pred_bool_clause(self, fc):
""" Implementation of bool_clause predicate
Args:
a: First array of booleans
b: Second array of booleans
"""
a, b = fc.args
# Default implementation
exprs = list(self._get_cpo_expr(a).value)
for x in self._get_cpo_expr(b).value:
exprs.append(1 - x)
self._add_to_model(modeler.sum_of(exprs) >= 1)
# Alternative implementation
# self._add_to_model( (modeler.max_of(a) > 0) | (modeler.min_of(b) == 0) )
# Other alternative implementation
# expr = None
# for x in _get_value(a):
# x = x > 0
# expr = x if expr is None else modeler.logical_or(expr, x)
# for x in _get_value(b):
# x = x == 0
# expr = x if expr is None else modeler.logical_or(expr, x)
# self._add_to_model(expr)
def _compile_pred_table_int(self, fc):
""" Implement custom predicate table_int
Args:
vars: Array of variables
values: List of values
"""
vars, values = fc.args
# Split value array in tuples
vars = self._get_cpo_expr(vars).value
tsize = len(vars)
if tsize != 0:
values = self._get_cpo_expr(values).value
tuples = [values[i: i + tsize] for i in range(0, len(values), tsize)]
# Build allowed assignment expression
self._add_to_model(modeler.allowed_assignments(vars, tuples))
def _compile_pred_lex_less_bool(self, fc):
""" Requires that the array vars1 is strictly lexicographically less than array vars2
Args:
vars1: First array of variables
vars2: Second array of variables
"""
vars1, vars2 = fc.args
# Add 0 and 1 at the end of arrays to force inequality
vars1 = list(self._get_cpo_expr(vars1).value) + [1]
vars2 = list(self._get_cpo_expr(vars2).value) + [0]
self._add_to_model(modeler.lexicographic(vars1, vars2))
def _compile_pred_lex_less_int(self, fc):
self._compile_pred_lex_less_bool(fc)
def _get_domain_bounds(self, x):
""" Get min and max bounds of an expression, integer variable or integer
Args:
expr: CPO integer variable or expression
Returns:
Tuple (min, max)
"""
# Case of variable or variable replaced by an expression
if isinstance(x, CpoIntVar):
return (x.get_domain_min(), x.get_domain_max())
if isinstance(x, CpoFunctionCall):
if x.operation is Oper_start_of:
return x.children[0].get_start()
if x.operation is Oper_size_of:
return x.children[0].get_size()
# if x.is_kind_of(Type_BoolExpr):
# return (0, 1)
cpov = self.cpo_exprs.get(x.name)
if isinstance(cpov, CpoIntVar):
return (cpov.get_domain_min(), cpov.get_domain_max())
raise FznParserException("Unknow expression to take bounds from: {}".format(x))
if isinstance(x, CpoValue):
x = x.value
if is_int(x):
return (x, x)
return x
def _assign_to_var(self, var, expr):
""" Set a identifier with an expression
Args:
var: Target FZN variable
expr: CPO expression to assign
Returns:
None. If needed, changes are done in reader context.
"""
#print("_assign_to_var {} expression {}".format(var, expr))
# Retrieve existing expression
vname = var.name
vexpr = self.cpo_exprs.get(vname)
# Check if reduction
if (not self.reduce) or (vname in self.cpo_variables) or (vexpr is not None and not isinstance(vexpr, CpoIntVar)):
self._add_to_model(modeler.equal(vexpr, expr))
else:
# Assign new name to expression
self.cpo_exprs[vname] = expr
expr.set_name(vname)
# Constrain expression to variable domain
self._constrain_expr_domain(expr, var)
def _make_equal(self, var, expr, dvar):
""" Make equal two FZN expressions
Args:
var: FZN variable or value
expr: CPO expression to be equal with
dvar: Define variable, None if none
Returns:
None. If needed, changes are done in reader context.
"""
# Check if var is not a variable
if not isinstance(var, FznVariable):
self._add_to_model(modeler.equal(expr, self._get_cpo_expr(var)))
return
# Check no reduction
if not self.reduce or (dvar is not var):
self._add_to_model(modeler.equal(self._get_cpo_expr(var), expr))
return
# Assign to variable
self._assign_to_var(var, expr)
def _constrain_expr_domain(self, expr, var):
""" Constrain the domain of an expression to the domain of a variable
Args:
expr: CPO expression to constrain
var: CPO or FZN integer var to take domain from
"""
dom = var.domain
#print(" constrain expression {} to domain {}".format(expr, dom))
dmin = expression._domain_min(dom)
dmax = expression._domain_max(dom)
# Check boolean expression
if dmin == 0 and dmax == 1:
return
if expr.is_kind_of(Type_BoolExpr) and (dmin <= 0) and (dmax >= 1):
return
# Add appropriate constraint
if len(dom) == 1: # Single segment
# Use range
if dmin == dmax:
self._add_to_model(modeler.equal(expr, dmin))
else:
self._add_to_model(modeler.range(expr, dmin, dmax))
else:
# Use allowed assignment
self._add_to_model(modeler.allowed_assignments(expr, expression._domain_iterator(dom)))
def _compile_op_assign_arg_1(self, fc, op):
""" Compile operation with single argument equal to a result
Args:
fc: Constraint descriptor
op: CPO operation to apply to first argument
"""
# Access constraint arguments
a, r = fc.args
# Assign to expression
self._make_equal(r, op(self._get_cpo_expr(a)), fc.defvar)
def _compile_op_assign_arg_2(self, fc, op):
""" Compile operation with two arguments equal to a result
Args:
fc: Constraint descriptor
op: CPO operation to apply to arguments
"""
# Access constraint arguments
a, b, r = fc.args
# Assign to expression
self._make_equal(r, op(self._get_cpo_expr(a), self._get_cpo_expr(b)), fc.defvar)
def _compile_op_arg_1(self, fc, op):
""" Compile operation with one arguments and no result
Args:
fc: Constraint descriptor
op: CPO operation to apply to argument
"""
self._add_to_model(op(self._get_cpo_expr(fc.args[0])))
def _compile_op_arg_2(self, fc, op):
""" Compile operation with two arguments and no result
Args:
fc: Constraint descriptor
op: CPO operation to apply to arguments
"""
a, b = fc.args
self._add_to_model(op(self._get_cpo_expr(a), self._get_cpo_expr(b)))
def _compile_array_xxx_element(self, fc):
""" Compile access to array element """
x, t, r = fc.args
if not is_int(x):
x = self._get_cpo_expr(x)
self._make_equal(r, modeler.element(self._get_cpo_expr(t), x - 1), fc.defvar)
def _compile_xxx_eq(self, fc):
""" Compile all equality predicates """
# Access constraint arguments
a, b = fc.args
# Check default case
if not self.reduce:
self._add_to_model(modeler.equal(self._get_cpo_expr(a), self._get_cpo_expr(b)))
return
# Process trivial cases
if a is b:
return None
# Retrieve defined variable
defvar = fc.defvar
if defvar is None:
self._add_to_model(modeler.equal(self._get_cpo_expr(a), self._get_cpo_expr(b)))
return
if defvar is a:
self._assign_to_var(a, self._get_cpo_expr(b))
else:
self._assign_to_var(b, self._get_cpo_expr(a))
def _compile_scal_prod(self, fc, op, reif=False):
""" Compile a scalar product
Args:
fc: Constraint
op: Comparison operation
"""
# Access constraint arguments
if reif:
coefs, vars, res, reif = fc.args
else:
coefs, vars, res = fc.args
# Check no reduction
defvar = fc.defvar
if not self.reduce or not defvar:
expr = op(modeler.scal_prod(self._get_cpo_expr(coefs), self._get_cpo_expr(vars)), self._get_cpo_expr(res))
if reif:
expr = modeler.equal(self._get_cpo_expr(reif), expr)
self._add_to_model(expr)
return
# Get array elements
coefs = _get_fzn_array(coefs)
vars = _get_fzn_array(vars)
# Check if defined variable is the result
if defvar is res or defvar is reif:
expr = self._build_scal_prod_expr(coefs, vars, 0)
else:
# Arrange expression to have defined variable on the left
vx = vars.index(defvar)
vcoef = coefs[vx]
vars = vars[:vx] + vars[vx + 1:]
coefs = coefs[:vx] + coefs[vx + 1:]
expr = res if is_int(res) else self._get_cpo_expr(res)
if vcoef < 0:
vcoef = -vcoef
expr = -expr
else:
coefs = list([-c for c in coefs])
# Build result
expr = self._build_scal_prod_expr(coefs, vars, expr)
if vcoef != 1:
expr = expr / vcoef
# Check reif
if reif:
expr = op(expr, self._get_cpo_expr(res))
self._assign_to_var(defvar, expr)
else:
# Check equality with a variable
if op is modeler.equal:
self._assign_to_var(defvar, expr)
else:
self._add_to_model(op(expr, self._get_cpo_expr(defvar)))
def _build_scal_prod_expr(self, coefs, vars, res):
""" Build a scal prod expression
Args:
coefs: Array of coefficients (integers)
vars: Array of FZN variables
res: Initial result value (integer)
Returns:
New CPO scal_prod expression
"""
# Build array of CPO variables
vars = [self._get_cpo_expr(v) for v in vars]
# Check developed scal_prod
if len(coefs) <= 2 or (all(c == 1 or c == -1 for c in coefs)):
for c, v in zip(coefs, vars):
if c != 0:
if is_int_value(res, 0):
res = _mutl_by_int(v, c)
elif c < 0:
res = res - _mutl_by_int(v, -c)
else:
res = res + _mutl_by_int(v, c)
return res
# Build normal scal_prod
expr = modeler.scal_prod(coefs, vars)
if not is_int_value(res, 0):
expr = res + expr
return expr
def _add_to_model(self, expr):
""" Add an expression to the CPO model
Args:
expr: CPO expression to add
"""
#print("_add_to_model({})".format(expr))
self.model.add(expr)
# Scan expression to identify used variables
estack = [expr]
doneset = set() # Set of expressions already processed
while estack:
e = estack.pop()
eid = id(e)
if not eid in doneset:
doneset.add(eid)
if e.type.is_variable:
#print(" add CPO variable {}".format(e))
self.cpo_variables.add(e.name)
# Stack children expressions
estack.extend(e.children)
def _write(self, out=None):
""" Write current parser status
Args:
out (optional): Write output. sys.stdout if not given
"""
if out is None:
out = sys.stdout
out.write("Reader status:\n")
out.write(" CPO expressions:\n")
for k in sorted(self.cpo_exprs.keys()):
v = self.cpo_exprs[k]
out.write(" {}: {} ({})\n".format(k, v, type(v)))
out.write(" Model expressions:\n")
for x in self.model.get_all_expressions():
out.write(" {}\n".format(x[0]))
###############################################################################
## Utility functions
###############################################################################
def _get_fzn_array(fzo):
""" Get the list of objects of | |
"""
This module provides tools for creating kmz products for a SICD type element.
.. Note::
Creation of ground overlays (i.e. image overlay) requires the optional
Pillow dependency for image manipulation.
Examples
--------
Create a kmz overview for the contents of a sicd type reader.
.. code-block:: python
import os
from sarpy.io.complex.converter import open_complex
from sarpy.visualization.kmz_product_creation import create_kmz_view
test_root = '<root directory>'
reader = open_complex(os.path.join(test_root, '<file name>>'))
create_kmz_view(reader, test_root,
file_stem='View-<something descriptive>',
pixel_limit=2048,
inc_collection_wedge=True)
"""
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
# TODO: tidy up significantly
import logging
from typing import Union
import json
import os
import numpy
from sarpy.processing.rational_polynomial import SarpyRatPolyError
from sarpy.processing.ortho_rectify.base import FullResolutionFetcher, OrthorectificationIterator
from sarpy.processing.ortho_rectify.ortho_methods import OrthorectificationHelper, NearestNeighborMethod
from sarpy.processing.ortho_rectify.projection_helper import PGProjection, PGRatPolyProjection
from sarpy.io.kml import Document
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.utils import sicd_reader_iterator
from sarpy.geometry.geocoords import ecf_to_geodetic
from sarpy.visualization.remap import RemapFunction, NRL
try:
# noinspection PyPackageRequirements
import PIL
import PIL.Image
except ImportError:
PIL = None
logger = logging.getLogger(__name__)
def _create_sicd_styles(kmz_document):
"""
Creates the appropriate styles for SICD usage.
Parameters
----------
kmz_document : Document
Returns
-------
None
"""
# bounding box style - maybe polygon, maybe corner points, clamped to ground
label = {'color': 'ffc0c0c0', 'scale': '1.0'}
icon = {'scale': '1.5', 'icon_ref': 'http://maps.google.com/mapfiles/kml/pushpin/blue-pushpin.png'}
line = {'color': 'ccff5050', 'width': '2.0'}
poly = {'color': '30ff5050'}
kmz_document.add_style('bounding_high', label_style=label, icon_style=icon, line_style=line, poly_style=poly)
label['scale'] = '0.75'
icon['scale'] = '1.0'
line['width'] = '1.0'
kmz_document.add_style('bounding_low', label_style=label, icon_style=icon, line_style=line, poly_style=poly)
kmz_document.add_style_map('bounding', 'bounding_high', 'bounding_low')
# valid data style - basic polygon, probably clamped to ground
line = {'color': 'cc5050ff', 'width': '2.0'}
poly = {'color': '305050ff'}
kmz_document.add_style('valid_high', line_style=line, poly_style=poly)
line['width'] = '1.0'
kmz_document.add_style('valid_low', line_style=line, poly_style=poly)
kmz_document.add_style_map('valid', 'valid_high', 'valid_low')
# scp - intended for basic point clamped to ground
label = {'color': 'ff50c0c0', 'scale': '1.0'}
icon = {'color': 'ff5050c0', 'scale': '1.5',
'icon_ref': 'http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png'}
kmz_document.add_style('scp_high', label_style=label, icon_style=icon)
label['scale'] = '0.75'
icon['scale'] = '1.0'
kmz_document.add_style('scp_low', label_style=label, icon_style=icon)
kmz_document.add_style_map('scp', 'scp_high', 'scp_low')
# arp position style - intended for gx track
line = {'color': 'ff50ff50', 'width': '1.5'}
label = {'color': 'ffc0c0c0', 'scale': '1.5'}
icon = {'scale': '2.0', 'icon_ref': 'http://maps.google.com/mapfiles/kml/shapes/track.png'}
poly = {'color': 'a050ff50'}
kmz_document.add_style('arp_high', line_style=line, label_style=label, icon_style=icon, poly_style=poly)
line['width'] = '1.0'
label['scale'] = '1.0'
icon['scale'] = '1.0'
poly = {'color': '7050ff50'}
kmz_document.add_style('arp_low', line_style=line, label_style=label, icon_style=icon, poly_style=poly)
kmz_document.add_style_map('arp', 'arp_high', 'arp_low')
# collection wedge style - intended as polygon
line = {'color': 'ffa0a050', 'width': '1.5'}
poly = {'color': 'a0a0a050'}
kmz_document.add_style('collection_high', line_style=line, poly_style=poly)
line['width'] = '1.0'
poly = {'color': '70a0a050'}
kmz_document.add_style('collection_low', line_style=line, poly_style=poly)
kmz_document.add_style_map('collection', 'collection_high', 'collection_low')
def _get_sicd_name(sicd):
"""
Gets the kml-styled name for the provided SICD.
Parameters
----------
sicd : SICDType
Returns
-------
str
"""
return sicd.CollectionInfo.CoreName
def _get_sicd_description(sicd):
"""
Gets the kml-styled description for the provided SICD.
Parameters
----------
sicd : SICDType
Returns
-------
str
"""
o_sicd = sicd.copy()
# junk the WgtFunct, it's huge and probably not interesting
try:
o_sicd.Grid.Row.WgtFunct = None
o_sicd.Grid.Col.WgtFunct = None
except AttributeError:
pass
return json.dumps(o_sicd.to_dict(), indent=1)
def _get_orthoiterator_description(ortho_iterator):
"""
Get a description for the ortho_iterator details.
Parameters
----------
ortho_iterator : OrthorectificationIterator
Returns
-------
str
"""
return 'ortho-rectified image for {2:s}<br>' \
'row resolution - {0:0.2f} meters<br>' \
'column resolution - {1:0.2f} meters<br>' \
'remap function - {3:s}'.format(
ortho_iterator.ortho_helper.proj_helper.row_spacing,
ortho_iterator.ortho_helper.proj_helper.col_spacing,
_get_sicd_name(ortho_iterator.sicd),
ortho_iterator.remap_function.name)
def _get_sicd_time_args(sicd, subdivisions=24):
# type: (SICDType, Union[int, None]) -> (dict, Union[None, numpy.ndarray])
"""
Fetch the SICD time arguments and array.
Parameters
----------
sicd : SICDType
subdivisions : int|None
Returns
-------
(dict, None|numpy.ndarray)
"""
if sicd.Timeline is None or sicd.Timeline.CollectStart is None:
return {}, None
beg_time = sicd.Timeline.CollectStart.astype('datetime64[us]')
if sicd.Timeline.CollectDuration is None:
return {'when': str(beg_time)+'Z',}, None
end_time = beg_time + int(sicd.Timeline.CollectDuration*1e6)
if not isinstance(subdivisions, int) or subdivisions < 2:
time_array = None
else:
time_array = numpy.linspace(0, sicd.Timeline.CollectDuration, subdivisions)
return {'beginTime': str(beg_time)+'Z', 'endTime': str(end_time)+'Z'}, time_array
def _write_image_corners(kmz_document, sicd, time_args, folder, write_points=True):
"""
Write the image corner.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
folder : minidom.Element
write_points : bool
Write points, or a polygon?
Returns
-------
None
"""
if sicd.GeoData is None or sicd.GeoData.ImageCorners is None:
return
frm = '{1:0.8f},{0:0.8f},0'
corners = sicd.GeoData.ImageCorners.get_array(dtype='float64')
if numpy.any(~numpy.isfinite(corners)):
logger.error('There are nonsense entries (nan or +/- infinity) in the corner locations array.')
if write_points:
names = ['FRFC', 'FRLC', 'LRLC', 'LRFC']
for nam, corner in zip(names, corners):
if numpy.any(~numpy.isfinite(corner)):
continue
coords = frm.format(*corner)
placemark = kmz_document.add_container(par=folder, description='{} for {}'.format(nam, _get_sicd_name(sicd)),
styleUrl='#bounding')
kmz_document.add_point(coords, par=placemark, altitudeMode='clampToGround', **time_args)
else:
# write the polygon
coords = ' '.join(frm.format(*el) for el in corners if not numpy.any(~numpy.isfinite(el)))
placemark = kmz_document.add_container(par=folder, description='image corners for {}'.format(_get_sicd_name(sicd)), styleUrl='#bounding')
kmz_document.add_polygon(coords, par=placemark, altitudeMode='clampToGround', **time_args)
def _write_valid_area(kmz_document, sicd, time_args, folder):
"""
Write the valid area polygon.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
folder : minidom.Element
Returns
-------
None
"""
if sicd.GeoData is None or sicd.GeoData.ValidData is None:
return
frm = '{1:0.8f},{0:0.8f},0'
valid_array = sicd.GeoData.ValidData.get_array(dtype='float64')
if numpy.any(~numpy.isfinite(valid_array)):
logger.error('There are nonsense entries (nan or +/- infinity) in the valid array location.')
coords = ' '.join(frm.format(*el) for el in valid_array)
coords += ' ' + frm.format(*valid_array[0, :])
placemark = kmz_document.add_container(par=folder, description='valid data for {}'.format(_get_sicd_name(sicd)), styleUrl='#valid')
kmz_document.add_polygon(coords, par=placemark, altitudeMode='clampToGround', **time_args)
def _write_scp(kmz_document, sicd, time_args, folder):
"""
Write the csp location.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
folder : minidom.Element
Returns
-------
None
"""
if sicd.GeoData is None or sicd.GeoData.SCP is None:
return
scp_llh = sicd.GeoData.SCP.LLH.get_array()
if numpy.any(~numpy.isfinite(scp_llh)):
logger.error('There are nonsense entries (nan or +/- infinity) in the scp location.')
frm = '{1:0.8f},{0:0.8f},0'
coords = frm.format(*scp_llh)
placemark = kmz_document.add_container(par=folder, description='SCP for {}'.format(_get_sicd_name(sicd)), styleUrl='#scp')
kmz_document.add_point(coords, par=placemark, altitudeMode='clampToGround', **time_args)
def _write_arp_location(kmz_document, sicd, time_args, time_array, folder):
"""
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
time_array : None|numpy.ndarray
folder : minidom.Element
Returns
-------
None|Numpy.ndarray
"""
if time_array is None:
return None
if sicd.Position is not None and sicd.Position.ARPPoly is not None:
arp_pos = sicd.Position.ARPPoly(time_array)
elif sicd.SCPCOA.ARPPos is not None and sicd.SCPCOA.ARPVel is not None:
arp_pos = sicd.SCPCOA.ARPPos.get_array() + numpy.outer(time_array, sicd.SCPCOA.ARPVel.get_array())
else:
return None
arp_llh = ecf_to_geodetic(arp_pos)
if numpy.any(~numpy.isfinite(arp_llh)):
logger.error('There are nonsense entries (nan or +/- infinity) in the aperture location.')
coords = ['{1:0.8f},{0:0.8f},{2:0.2f}'.format(*el) for el in arp_llh]
whens = [str(sicd.Timeline.CollectStart.astype('datetime64[us]') + int(el*1e6)) + 'Z' for el in time_array]
placemark = kmz_document.add_container(par=folder, description='aperture position for {}'.format(_get_sicd_name(sicd)), styleUrl='#arp', **time_args)
kmz_document.add_gx_track(coords, whens, par=placemark, extrude=True, tesselate=True, altitudeMode='absolute')
return arp_llh
def _write_collection_wedge(kmz_document, sicd, time_args, arp_llh, time_array, folder):
"""
Writes the collection wedge.
Parameters
----------
kmz_document : Document
sicd : SICDType
time_args : dict
arp_llh : None|numpy.ndarray
time_array : None|numpy.ndarray
folder : minidom.Element
Returns
-------
None
"""
if time_array is None or arp_llh is None:
return
if sicd.Position is not None and sicd.Position.GRPPoly is not None:
grp = sicd.Position.GRPPoly(time_array)
elif sicd.GeoData is not None and sicd.GeoData.SCP is not None:
grp = numpy.reshape(sicd.GeoData.SCP.ECF.get_array(), (1, 3))
else:
return
frm = '{1:0.8f},{0:0.8f},{2:0.2f}'
grp_llh = ecf_to_geodetic(grp)
if numpy.any(~numpy.isfinite(grp_llh)):
logger.error('There are nonsense entries (nan or +/- infinity) in the scp/ground range locations.')
coord_array = [frm.format(*el) for el in arp_llh]
if len(grp_llh) > 1:
coord_array.extend(frm.format(*el) for el in grp_llh[::-1, :])
else:
coord_array.append(frm.format(*grp_llh[0, :]))
coord_array.append(frm.format(*arp_llh[0, :]))
coords = ' '.join(coord_array)
placemark = kmz_document.add_container(par=folder, description='collection wedge for {}'.format(_get_sicd_name(sicd)), styleUrl='#collection', **time_args)
kmz_document.add_polygon(coords, par=placemark, extrude=False, tesselate=False, altitudeMode='absolute')
def _write_sicd_overlay(ortho_iterator, kmz_document, folder):
"""
Write the orthorectified SICD ground overlay.
Parameters
----------
ortho_iterator : OrthorectificationIterator
kmz_document : Document
folder : minidom.Element
Returns
-------
None
"""
def reorder_corners(llh_in):
return llh_in[::-1, :]
if PIL is None:
logger.error(
'This functionality for writing kmz ground overlays requires the optional Pillow dependency.')
return
time_args, _ = _get_sicd_time_args(ortho_iterator.sicd, subdivisions=None)
# create the output workspace
if ortho_iterator.remap_function.bit_depth != 8:
raise ValueError('The bit depth for the remap function must be 8, for now.')
image_data = numpy.zeros(ortho_iterator.ortho_data_size, dtype=ortho_iterator.remap_function.output_dtype)
# populate by iterating
for data, start_indices in ortho_iterator:
image_data[start_indices[0]:start_indices[0]+data.shape[0],
start_indices[1]:start_indices[1]+data.shape[1]] = data
# create regionated overlay
# convert image array to PIL image.
img = PIL.Image.fromarray(image_data) # this is to counteract the PIL treatment
lat_lon_quad = reorder_corners(ortho_iterator.get_llh_image_corners())
kmz_document.add_regionated_ground_overlay(
img, folder, lat_lon_quad=lat_lon_quad[:, :2], img_format='JPEG',
name='image overlay for {}'.format(_get_sicd_name(ortho_iterator.sicd)),
description=_get_orthoiterator_description(ortho_iterator))
def prepare_kmz_file(file_name, **args):
"""
Prepare a kmz document and archive for exporting.
Parameters
----------
file_name : str
args
Passed through to the Document constructor.
Returns
-------
Document
"""
document = Document(file_name=file_name, **args)
_create_sicd_styles(document)
return document
def add_sicd_geometry_elements(sicd, kmz_document, folder,
inc_image_corners=True, inc_valid_data=False,
inc_scp=False, inc_collection_wedge=True):
"""
Write the geometry elements of a SICD.
Parameters
----------
sicd : SICDType
kmz_document : Document
folder : minidom.Element
| |
<reponame>cleary-lab/CISI<filename>decompression/autoencoding/module/ccae.py
import numpy as np
import tensorflow as tf
import glob,os,sys
from scipy.spatial import distance
from tensorflow_probability import edward2 as ed
import tensorflow_probability as tfp
tfd = tfp.distributions
from tensorflow.core.framework.summary_pb2 import Summary
import io
from tensorflow.python.lib.io import file_io
from scipy.stats import entropy
import sys
def _parse_function(example):
features = {"raw_data": tf.FixedLenFeature([], tf.string),
"height": tf.FixedLenFeature((), tf.int64),
"width": tf.FixedLenFeature((), tf.int64),
"channels": tf.FixedLenFeature((), tf.int64),
"tissue": tf.FixedLenFeature([], tf.string),
"fov": tf.FixedLenFeature((), tf.int64),
"row_offset": tf.FixedLenFeature((), tf.int64),
"col_offset": tf.FixedLenFeature((), tf.int64),
"raw_validation": tf.FixedLenFeature([], tf.string),
"validation_indices": tf.FixedLenSequenceFeature((), tf.int64,True),
"n_cells": tf.FixedLenFeature((), tf.int64),
"max_pixels_per_cell": tf.FixedLenFeature((), tf.int64),
"sp_embedding_mask_row": tf.FixedLenSequenceFeature((), tf.int64,True),
"sp_embedding_mask_col": tf.FixedLenSequenceFeature((), tf.int64,True),
"sp_embedding_mask_values": tf.FixedLenSequenceFeature((), tf.int64,True)}
parsed_features = tf.parse_single_example(example, features)
raw_data = tf.decode_raw(parsed_features['raw_data'],tf.float32)
raw_data = raw_data - tf.reduce_min(raw_data)
raw_validation = tf.decode_raw(parsed_features['raw_validation'],tf.float32)
raw_validation = raw_validation - tf.reduce_min(raw_validation)
indices = tf.stack([parsed_features['sp_embedding_mask_row'], parsed_features['sp_embedding_mask_col']],axis=1)
values = parsed_features['sp_embedding_mask_values']
# need all to be the same shape, so just using some big number as proxy for max_pixels_per_cell
shape = [parsed_features['n_cells'], 15000]
sparse_embedding_tensor = tf.sparse.SparseTensor(indices, values, shape)
# tf's padded batch breaks with sparse tensors...
sparse_embedding_tensor = tf.sparse.to_dense(sparse_embedding_tensor)
return {'features': (raw_data,
parsed_features['height'],
parsed_features['width'],
parsed_features['channels'],
parsed_features['tissue'],
parsed_features['fov'],
parsed_features['row_offset'],
parsed_features['col_offset'],
sparse_embedding_tensor),
'labels': (raw_validation, parsed_features['validation_indices'])}
def _parse_function_withAugment(example):
features = {"raw_data": tf.FixedLenFeature([], tf.string), "height": tf.FixedLenFeature((), tf.int64), "width": tf.FixedLenFeature((), tf.int64), "channels": tf.FixedLenFeature((), tf.int64), "tissue": tf.FixedLenFeature([], tf.string), "fov": tf.FixedLenFeature((), tf.string), "row_offset": tf.FixedLenFeature((), tf.int64), "col_offset": tf.FixedLenFeature((), tf.int64)}
parsed_features = tf.parse_single_example(example, features)
raw_data = tf.decode_raw(parsed_features['raw_data'],tf.float32)
raw_data = raw_data - tf.reduce_min(raw_data)
processed_data = augment_data(raw_data)
return processed_data, parsed_features['height'], parsed_features['width'], parsed_features['channels'], parsed_features['tissue'], parsed_features['fov'], parsed_features['row_offset'], parsed_features['col_offset']
def get_dataset(filepath_list,batch_size,epochs=1,shuffle=True,batch_repeats=0):
dataset = tf.data.TFRecordDataset(filepath_list)
if batch_repeats > 0:
dataset = dataset.flat_map(lambda x: tf.data.Dataset.from_tensors(x).repeat(batch_repeats))
dataset = dataset.map(_parse_function)
if shuffle:
dataset = dataset.shuffle(buffer_size=50)
dataset = dataset.repeat(epochs)
dataset = dataset.padded_batch(batch_size=batch_size, padded_shapes={'features': ([None], [] ,[], [], [], [], [], [], [None,15000]), 'labels': ([None], [None])})
dataset = dataset.prefetch(buffer_size=None)
return dataset
def augment_data(input_data, angle=5, shift=5):
num_images_ = tf.shape(input_data)[0]
# random rotate
processed_data = tf.contrib.image.rotate(input_data, tf.random_uniform([num_images_], maxval=np.pi / 180 * angle, minval=np.pi / 180 * -angle))
# random shift
base_row = tf.constant([1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32)
base_ = tf.tile(base_row, [num_images_, 1])
mask_row = tf.constant([0, 0, 1, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32)
mask_ = tf.tile(mask_row, [num_images_, 1])
random_shift_ = tf.random_uniform([num_images_, 8], minval=-shift, maxval=shift, dtype=tf.float32)
transforms_ = base_ + random_shift_ * mask_
processed_data = tf.contrib.image.transform(images=processed_data, transforms=transforms_)
return processed_data
def remove_channel(sample,to_remove):
ind = tf.concat([tf.range(0, to_remove), tf.range(to_remove + 1, tf.shape(sample)[3])],0)
removed = tf.gather(sample, ind, axis=3)
return removed
def _weight_variable(shape, name='weights', regularization=False, clip_value=(),clip_norm=(),init_type=None,init_var=0.1):
if init_type == 'truncated_normal':
initial = tf.truncated_normal_initializer(0, 0.1)
else:
initial = None
if (len(clip_value) > 0) and (len(clip_norm) == 0):
var = tf.get_variable(name, shape, tf.float32, initializer=initial, constraint=lambda x: tf.clip_by_value(x, clip_value[0], clip_value[1]))
else:
var = tf.get_variable(name, shape, tf.float32,initializer=initial)
return var
def shared_conv2d(input,filter_height, filter_width,num_filters,strides=[1,2,2,1],padding='SAME'):
# batch_size x in_w x in_h x channels x num_filters
if len(input.get_shape()) == 4:
input = tf.expand_dims(input,axis=-1)
# apply one set of filters to all channels, but don't convolve across channels
filter = _weight_variable([filter_height,filter_width,input.get_shape()[-1],num_filters])
x = tf.map_fn(lambda xc: tf.nn.conv2d(xc,filter,strides,padding), tf.transpose(input,perm=[3,0,1,2,4]))
# batch_size x in_w x in_h x channels x num_filters
x = tf.transpose(x,perm=[1,2,3,0,4])
return x
def shared_conv2d_transpose(input,filter_height, filter_width,num_filters,strides=[1,2,2,1],padding='SAME'):
in_shape = input.get_shape()
filter = _weight_variable([filter_height, filter_width, num_filters, in_shape[-1]])
out_shape = tf.stack([tf.shape(input)[0], tf.shape(input)[1]*strides[1], tf.shape(input)[2]*strides[2], num_filters])
x = tf.map_fn(lambda xc: tf.nn.conv2d_transpose(xc,filter,out_shape,strides,padding=padding), tf.transpose(input,perm=[3,0,1,2,4]))
x = tf.transpose(x,perm=[1,2,3,0,4])
return x
def _encode(x,filter_sizes,stride_factor):
strides = [1,stride_factor,stride_factor,1]
for i,(filter_height,filter_width,num_filters) in enumerate(filter_sizes):
with tf.variable_scope('Layer{}'.format(i+1),reuse=tf.AUTO_REUSE):
x = shared_conv2d(x,filter_height,filter_width,num_filters,strides=strides)
x = tf.nn.relu(x)
return x
def _decode(x,filter_sizes,stride_factor):
strides = [1,stride_factor,stride_factor,1]
for i,(filter_height,filter_width,num_filters) in enumerate(filter_sizes):
with tf.variable_scope('Layer{}'.format(i+1),reuse=tf.AUTO_REUSE):
x = shared_conv2d_transpose(x,filter_height,filter_width,num_filters,strides=strides)
if i == len(filter_sizes)-1:
x = x[:,:,:,:,0]
x = tf.nn.relu(x)
else:
x = tf.nn.relu(x)
return x
def encode_and_decode(encode_filters,decode_filters,stride_factor):
def encode(input,augment,to_remove=None):
shape = tf.stack([[-1],input[1][:1], input[2][:1], input[3][:1]],0)[:,0]
data = tf.reshape(input[0],shape)
if to_remove is not None:
data = remove_channel(data,to_remove)
data = tf.cond(augment, lambda: augment_data(data), lambda: data)
with tf.variable_scope('Encode', reuse=tf.AUTO_REUSE):
encode_sample = _encode(data,encode_filters,stride_factor)
sample = [data] + list(input[4:])
return sample,encode_sample
def decode(input):
with tf.variable_scope('Decode', reuse=tf.AUTO_REUSE):
decode_sample = _decode(input,decode_filters,stride_factor)
return decode_sample
return encode,decode
def composite_latent_encoding(x,Phi,U,image_size,patch_size,fov_start,fovs,offset_row,offset_col):
# input: batch_size x height x width x old_channels x num_filters
# output: batch_size x height x width x new_channels x num_filters
# fovs should be contiguous..will offset so min(fov) is index 0
in_shape = x.get_shape()
mod_shape = [-1, patch_size[0], patch_size[1], in_shape[4]]
with tf.variable_scope('Decompress', reuse=tf.AUTO_REUSE):
W = _weight_variable([U.shape[1]] + [image_size[0], image_size[1], image_size[2], in_shape[4]],init_type='truncated_normal')
W_idx = tf.stack([fovs-fov_start,offset_row,offset_col], axis=1)
W_reshape = tf.map_fn(lambda i: W[:,i[0],i[1]:i[1]+patch_size[0],i[2]:i[2]+patch_size[1],:], W_idx, dtype=tf.float32)
# dict_size x (batch_size x height x width x num_filters)
W_reshape = tf.reshape(tf.transpose(W_reshape,perm=[1,0,2,3,4]),[U.shape[1],-1])
# new_channels x (batch_size x height x width x num_filters)
x_hat = tf.matmul(U,W_reshape)
x_hat = tf.nn.relu(x_hat)
# old_channels x (batch_size x height x width x num_filters)
y = tf.matmul(Phi,x_hat)
x_hat = tf.reshape(x_hat,[x_hat.get_shape()[0]] + mod_shape)
x_hat = tf.transpose(x_hat,perm=[1,2,3,0,4])
y = tf.reshape(y,[y.get_shape()[0]] + mod_shape)
y = tf.transpose(y,perm=[1,2,3,0,4])
return y,x_hat,W
def latent_gamma(shape,concentration,rate):
W = ed.Gamma(concentration=concentration, rate=rate, sample_shape=shape)
return W
def latent_poisson(shape,rate):
rate = tf.constant(rate,shape=shape)
prior = ed.Poisson(rate=rate)
return prior
def latent_normal(shape,mean,stdev):
if not isinstance(mean,list):
mean = tf.constant(mean,shape=shape)
stdev = tf.constant(stdev,shape=shape)
prior = ed.Normal(loc=mean,scale=stdev)
return prior
def get_entropy(W,collapse_channels=False):
W_ent = tf.abs(W)
if collapse_channels:
W_ent = tf.reduce_sum(W_ent,axis=-1)
W_ent = W_ent/(tf.reduce_sum(W_ent,axis=0) + 1e-5)
W_ent = tf.log(W_ent + 1e-5)*W_ent
W_ent = tf.exp(-tf.reduce_sum(W_ent,axis=0))
# having issues with inf values...
W_ent = tf.clip_by_value(W_ent,0,tf.cast(tf.shape(W)[0],tf.float32))
return W_ent
def composite_decoding(x,Phi,image_dim):
in_shape = x.get_shape()
y = tf.reshape(x,[-1, in_shape[-1]])
y = tf.matmul(y,tf.transpose(Phi))
y = tf.reshape(y,[tf.shape(x)[0], image_dim, image_dim, y.get_shape()[-1]])
return y
def get_total_variation(x):
tv = tf.map_fn(lambda xi: tf.image.total_variation(tf.expand_dims(xi,-1)), tf.transpose(x,perm=[3,0,1,2]))
tv = tf.transpose(tv)
return tv
def get_eval_summary(EvalImages,EvalImageIndices,DecompressedImages,image_dim):
# This assumes every example in the batch has the same set of validation genes
# (which will generally be true, as long as all examples come from the same tissue)
EvalImages = tf.reshape(EvalImages,[-1,image_dim,image_dim,tf.shape(EvalImageIndices)[1]])
#Decompressed_subset = tf.gather(DecompressedImages,EvalImageIndices[0],axis=-1)
Decompressed_subset = tf.map_fn(lambda de: tf.gather(de[0],de[1],axis=-1), (DecompressedImages,EvalImageIndices),dtype=tf.float32)
return EvalImages,Decompressed_subset
def load_numpy_gcs(path,mode='rb'):
try:
x = np.load(path)
except:
f_stream = file_io.FileIO(path, mode)
x = np.load(io.BytesIO(f_stream.read()) )
return x
def save_numpy_gcs(path,object):
try:
np.save(path,object)
except:
np.save(file_io.FileIO(path, 'w'), object)
def ssim_metric(im1,im2):
return tf.metrics.mean(tf.image.ssim_multiscale(im1,tf.div(im2,tf.reduce_max(im2)),1))
def l1_metric(im1,im2):
return tf.metrics.mean(tf.losses.absolute_difference(tf.math.log(im1+1e-5),tf.math.log(im2+1e-5)))
def mse_combined_metric(tensor_tuples):
return tf.metrics.mean([tf.reduce_mean(tf.square(t1-t2)) for t1,t2 in tensor_tuples])
def resize_correlation_metric(im1,im2,size,factor=4):
new_size = tf.constant([int(size/factor),int(size/factor)])
im1_resize = tf.image.resize_images(im1,new_size)
im2_resize = tf.image.resize_images(im2,new_size)
m1,s1 = tf.nn.moments(im1_resize,axes=[0,1,2])
m2,s2 = tf.nn.moments(im2_resize,axes=[0,1,2])
m12,s12 = tf.nn.moments(tf.multiply(im1_resize-m1,im2_resize-m2),axes=[0,1,2])
corr = tf.div(m12, tf.multiply(tf.sqrt(s1), tf.sqrt(s2)) + 1e-5)
return tf.metrics.mean(corr)
def correlation_metric(im1,im2):
# inputs: cells x channels
m1,s1 = tf.nn.moments(im1,axes=[0])
m2,s2 = tf.nn.moments(im2,axes=[0])
m12,s12 = tf.nn.moments(tf.multiply(im1-m1,im2-m2),axes=[0])
corr = tf.div(m12, tf.multiply(tf.sqrt(s1), tf.sqrt(s2)) + 1e-5)
return tf.metrics.mean(corr)
def correlation_matrix(x, reshape_first=False,rescale=False):
if reshape_first:
x = tf.reduce_sum(x,axis=4)
if rescale:
x = tf.image.resize_images(x,[tf.shape(x)[1]/4,tf.shape(x)[2]/4])
x = tf.transpose(x,[3,0,1,2])
x = tf.reshape(x,[tf.shape(x)[0],-1])
m,s = tf.nn.moments(x,axes=[1])
mx = tf.matmul(m[...,None],m[None,...])
vx = tf.matmul(x,tf.transpose(x))/tf.cast(tf.shape(x[1]),tf.float32)
sx = tf.matmul(tf.sqrt(s)[...,None],tf.sqrt(s)[None,...])
corr = tf.div(vx - mx,sx + 1e-5)
ones = tf.ones_like(corr)
mask_a = tf.matrix_band_part(ones, 0, -1) # Upper triangular matrix of 0s and 1s
mask_b = tf.matrix_band_part(ones, 0, 0) # Diagonal matrix of 0s and 1s
mask = tf.cast(mask_a - mask_b, dtype=tf.bool) # Make a bool mask
return tf.boolean_mask(corr,mask)
def sparse_embedding_single_example(images, dense_embedding_indices):
# images input shape: height x width x channels
im_shape = tf.shape(images)
im_reshape = tf.reshape(images,[-1,im_shape[2]])
sparse_embedding_indices = tf.contrib.layers.dense_to_sparse(dense_embedding_indices)
x = tf.nn.embedding_lookup_sparse(im_reshape,sparse_embedding_indices,None,combiner='sum')
zero_pad_size = tf.shape(dense_embedding_indices)[0] - tf.shape(x)[0]
x = tf.concat([x,tf.zeros([zero_pad_size, tf.shape(x)[1]])], axis=0)
return x
def batch_sparse_embedding(batch_images, batch_dense_embedding_indices):
cell_integrated_intensities = tf.map_fn(lambda x: sparse_embedding_single_example(x[0],x[1]), (batch_images, batch_dense_embedding_indices), dtype=tf.float32)
cell_integrated_intensities = tf.reshape(cell_integrated_intensities,[-1,tf.shape(cell_integrated_intensities)[2]])
# remove padded empty cells
nnz = tf.count_nonzero(cell_integrated_intensities,axis=1)
idx = tf.where(tf.not_equal(nnz,0))
cell_integrated_intensities = tf.gather_nd(cell_integrated_intensities,idx)
# output shape is cells x channels
return cell_integrated_intensities
def build_estimator_ae(features, mode, params):
next_element = features['features']
validation_data,validation_indices = features['labels']
hparams = params['hparams']
# Load composition matrix
Phi = params['Phi']
U = params['U']
PriorAbundance = params['PriorAbundance']
# Training Params
lr_auto = 1e-2
# Network Inputs
lr_autoencode = lr_auto#tf.placeholder(tf.float32,())
ph_dropout = 1#tf.placeholder(tf.float32, (), 'dropout')
phase = 0#tf.placeholder(tf.bool, name='phase')
augment = tf.placeholder_with_default(False, shape=())
# Network Architecture
encode_filters = [[3,3,hparams.num_filters] for _ in range(hparams.num_layers)]
decode_filters = [[3,3,hparams.num_filters]]*(hparams.num_layers-1) + [[3,3,1]]
stride_factor = 2
# Target sparsity of decoding
sparsity_decode = hparams.pixel_sparsity*hparams.image_dim**2
# Build AutoEncoder
encode,decode = encode_and_decode(encode_filters,decode_filters,stride_factor)
sample,encode_sample = encode(next_element,augment)
decode_sample = decode(encode_sample)
prior_decode = latent_normal([Phi.shape[0]],sparsity_decode,sparsity_decode)
decode_entropy = get_entropy(tf.reshape(decode_sample,(-1,tf.shape(decode_sample)[-1])))
# Build AE loss
if hparams.loss_fn == 'ms-ssim':
auto_loss = -tf.reduce_mean(tf.image.ssim_multiscale(sample[0],tf.div(decode_sample,tf.reduce_max(decode_sample)),1))
elif hparams.loss_fn == 'l1':
auto_loss = tf.reduce_mean(tf.losses.absolute_difference(tf.math.log(sample[0]+1e-5),tf.math.log(decode_sample+1e-5)))
elif hparams.loss_fn == 'mse':
auto_loss = tf.reduce_mean(tf.square(sample[0] - decode_sample))
auto_loss += -tf.reduce_mean(prior_decode.distribution.log_prob(decode_entropy))*hparams.lambda_decode
auto_loss += tf.reduce_mean(get_total_variation(decode_sample))*hparams.lambda_tv
# Build graph for decompression even though we don't update in this estimator
sample_decompress,encode_sample_decompress = encode(next_element,augment)
decode_sample_decompress = decode(encode_sample_decompress)
encode_size = int(hparams.full_dim/(stride_factor**(len(encode_filters))))
encode_patch_size = int(hparams.image_dim/(stride_factor**(len(encode_filters))))
offset_row = tf.cast(sample_decompress[3]/(stride_factor**(len(encode_filters))),tf.int64)
offset_col = tf.cast(sample_decompress[4]/(stride_factor**(len(encode_filters))),tf.int64)
encode_sample_fit, encode_latent, W = composite_latent_encoding(encode_sample_decompress, Phi, U, (params['num_fov'],encode_size,encode_size), (encode_patch_size, encode_patch_size), params['fov_start'], sample_decompress[2], offset_row, offset_col)
prior_W = latent_poisson(W.get_shape()[1:-1],hparams.sparsity_k)
W_entropy = get_entropy(W,collapse_channels=True)
decode_latent = decode(encode_latent)
compose_latent = composite_decoding(decode_latent,Phi,hparams.image_dim)
cell_intensities = batch_sparse_embedding(decode_latent, next_element[8])
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions={'tissue': sample_decompress[1], 'fov': sample_decompress[2], 'offset_r': sample_decompress[3], 'offset_col': sample_decompress[4],'decompressed_data': decode_latent, 'encoded_data': encode_latent})
prior_decode_latent = latent_normal(None,PriorAbundance*sparsity_decode/hparams.abundance_factor, sparsity_decode/hparams.abundance_factor)
decode_latent_entropy = tf.cond(tf.reduce_sum(cell_intensities) > 0, true_fn= lambda: get_entropy(cell_intensities), false_fn= lambda: tf.zeros([tf.shape(cell_intensities)[1]]))
gene_correlation = tf.cond(tf.reduce_sum(cell_intensities) > 0, true_fn= lambda: correlation_matrix(encode_latent,True), false_fn= lambda: tf.zeros([len(params['gene_correlation'])]))
#gene_correlation = tf.cond(tf.reduce_sum(cell_intensities) > 0, true_fn= lambda: correlation_matrix(tf.transpose(cell_intensities)), false_fn= lambda: tf.zeros([len(params['gene_correlation'])]))
encode_loss = tf.reduce_mean(tf.square(encode_sample_decompress - encode_sample_fit))
if hparams.loss_fn == 'ms-ssim':
decode_loss = -tf.reduce_mean(tf.image.ssim_multiscale(sample_decompress[0],tf.div(compose_latent,tf.reduce_max(compose_latent)),1))
elif hparams.loss_fn == 'l1':
decode_loss = tf.reduce_mean(tf.losses.absolute_difference(tf.math.log(decode_sample_decompress+1e-5),tf.math.log(compose_latent+1e-5)))
elif hparams.loss_fn == 'mse':
decode_loss = tf.reduce_mean(tf.square(decode_sample_decompress - compose_latent))
# Decompression loss
reg_loss = -tf.reduce_mean(prior_W.distribution.log_prob(W_entropy))*hparams.lambdaW
reg_loss += -tf.reduce_mean(prior_decode_latent.distribution.log_prob(decode_latent_entropy))*hparams.lambda_decode*hparams.lambda_abundance_factor
reg_loss += tf.reduce_mean(get_total_variation(decode_latent))*hparams.lambda_tv
reg_loss += tf.reduce_mean(tf.square(gene_correlation - params['gene_correlation']))*hparams.lambda_gene_correlation
decompress_loss = encode_loss + decode_loss + reg_loss
summary_im1,summary_im2 = get_eval_summary(validation_data,validation_indices,decode_latent,hparams.image_dim)
cell_intensities_im1 = batch_sparse_embedding(summary_im1, next_element[8])
cell_intensities_im2 = batch_sparse_embedding(summary_im2, next_element[8])
# Build optimizers
optimizer_decomp = tf.train.AdamOptimizer(name='Adam_decomp',learning_rate=lr_autoencode, beta1=0.9, beta2=0.999)
decompress_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Decompress')
# Create training operations
train_decompress = optimizer_decomp.minimize(decompress_loss, var_list=decompress_vars, global_step=tf.train.get_global_step())
if mode == tf.estimator.ModeKeys.EVAL:
if hparams.loss_fn == 'ms-ssim':
metrics = {'ms-ssim': ssim_metric(sample[0], decode_sample)}
elif hparams.loss_fn == 'l1':
metrics = {'l1_ae': l1_metric(sample[0], decode_sample)}
elif hparams.loss_fn == 'mse':
metrics = {'mse': tf.metrics.mean_squared_error(sample[0],decode_sample)}
return tf.estimator.EstimatorSpec(mode, loss=auto_loss, eval_metric_ops=metrics)
if mode == tf.estimator.ModeKeys.TRAIN:
# Build optimizers
optimizer_auto = tf.train.AdamOptimizer(name='Adam_ae',learning_rate=lr_autoencode, beta1=0.9, beta2=0.999)
encode_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Encode')
decode_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Decode')
# Create training operations (for batch_norm)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_auto = optimizer_auto.minimize(auto_loss, var_list=encode_vars+decode_vars, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=auto_loss, train_op=train_auto)
def build_estimator_decompress(features, mode, params):
next_element = features['features']
validation_data,validation_indices = features['labels']
hparams = params['hparams']
# Load gene modules and composition matrix
Phi = params['Phi']
U = params['U']
PriorAbundance = params['PriorAbundance']
# Training Params
lr_auto = 2e-2
# Network Inputs
lr_autoencode = lr_auto#tf.placeholder(tf.float32,())
ph_dropout = 1#tf.placeholder(tf.float32, (), 'dropout')
phase = 0#tf.placeholder(tf.bool, name='phase')
augment = tf.placeholder_with_default(False, shape=())
# Network Architecture
encode_filters = [[3,3,hparams.num_filters] for _ in range(hparams.num_layers)]
decode_filters = [[3,3,hparams.num_filters]]*(hparams.num_layers-1) + [[3,3,1]]
stride_factor = 2
# Target sparsity of decoding
sparsity_decode = hparams.pixel_sparsity*hparams.image_dim**2
# Build AutoEncoder
encode,decode = encode_and_decode(encode_filters,decode_filters,stride_factor)
# Build decompression
sample_decompress,encode_sample_decompress = encode(next_element,augment)
decode_sample_decompress = decode(encode_sample_decompress)
encode_size = int(hparams.full_dim/(stride_factor**(len(encode_filters))))
encode_patch_size = int(hparams.image_dim/(stride_factor**(len(encode_filters))))
offset_row = tf.cast(sample_decompress[3]/(stride_factor**(len(encode_filters))),tf.int64)
offset_col = tf.cast(sample_decompress[4]/(stride_factor**(len(encode_filters))),tf.int64)
encode_sample_fit, encode_latent, W = composite_latent_encoding(encode_sample_decompress, Phi, U, (params['num_fov'],encode_size,encode_size), (encode_patch_size, encode_patch_size), params['fov_start'], sample_decompress[2], offset_row, offset_col)
prior_W = latent_poisson(W.get_shape()[1:-1],hparams.sparsity_k)
W_entropy = get_entropy(W,collapse_channels=True)
decode_latent = decode(encode_latent)
compose_latent = composite_decoding(decode_latent,Phi,hparams.image_dim)
cell_intensities = batch_sparse_embedding(decode_latent, next_element[8])
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions={'tissue': sample_decompress[1], 'fov': sample_decompress[2], 'offset_r': sample_decompress[3], 'offset_col': sample_decompress[4],'decompressed_data': decode_latent, 'encoded_data': encode_latent})
prior_decode_latent = latent_normal(None,PriorAbundance*sparsity_decode/hparams.abundance_factor, sparsity_decode/hparams.abundance_factor)
decode_latent_entropy = tf.cond(tf.reduce_sum(cell_intensities) > 0, true_fn= lambda: get_entropy(cell_intensities), | |
"""The standard runtime library."""
from runtime.env import (Datatype, Value, Function, Operator,
Signature, FunctionBinding, CastException, ANY, NULL,
RuntimeException)
NUMBER = Datatype("*number", None, ANY)
def cast_integer(value):
"""Casts a value to an INTEGER."""
if isinstance(value, Value):
if value.datatype in (FLOAT, INTEGER):
return Value(INTEGER, int(value.data))
if value.datatype is BOOLEAN:
return Value(INTEGER, 1 if value.data else 0)
if value.datatype is NULL:
return Value(INTEGER, 0)
raise CastException(value, INTEGER)
INTEGER = Datatype("int", cast_integer, NUMBER, lambda x: "%d" % x)
def cast_float(value):
"""Casts a value to a FLOAT."""
if isinstance(value, Value):
if value.datatype in (FLOAT, INTEGER):
return Value(FLOAT, float(value.data))
if value.datatype is NULL:
return Value(FLOAT, 0.0)
raise CastException(value, FLOAT)
FLOAT = Datatype("float", cast_float, NUMBER, lambda x: "%f" % x)
def cast_string(value):
"""Casts a value to a STRING."""
if isinstance(value, Value):
if value.datatype is INTEGER:
return Value(STRING, "%d" % value.data)
if value.datatype in (FLOAT, STRING):
return Value(STRING, str(value.data))
if value.datatype is BOOLEAN:
return Value(STRING, "true" if value.data else "false")
if value.datatype is NULL:
return Value(STRING, "")
raise CastException(value, STRING)
STRING = Datatype("string", cast_string, ANY, lambda x: "\"" + x + "\"")
def cast_boolean(value):
"""Casts a value to a BOOLEAN."""
if isinstance(value, Value):
if value.datatype is INTEGER:
return Value(BOOLEAN, True if value.data > 0 else False)
if value.datatype is BOOLEAN:
return Value(BOOLEAN, bool(value.data))
if value.datatype is NULL:
return Value(BOOLEAN, False)
raise CastException(value, BOOLEAN)
BOOLEAN = Datatype("bool", cast_boolean, ANY, lambda x: "true" if x else "false")
def cast_function(value):
"""Casts a value to a FUNCTION."""
if isinstance(value, Value):
if value.datatype is FUNCTION:
return Value(FUNCTION, value.data)
if value.datatype is NULL:
return Value(FUNCTION, None)
raise CastException(value, FUNCTION)
FUNCTION = Datatype("func", cast_function, ANY, lambda x: "function")
def cast_list(value):
"""Casts a value to a LIST."""
if isinstance(value, Value):
if value.datatype in (LIST, STRING):
return Value(LIST, list(value.data))
if value.datatype is NULL:
return Value(LIST, [])
raise CastException(value, LIST)
LIST = Datatype("LIST", cast_list, ANY, lambda x: "list")
def cast_map(value):
"""Casts a value to a MAP."""
if isinstance(value, Value):
if value.datatype is MAP:
return Value(MAP, dict(value.data))
if value.datatype is NULL:
return Value(MAP, dict())
raise CastException(value, MAP)
MAP = Datatype("map", cast_map, ANY, lambda x: "map")
def cast_set(value):
"""Casts a value to a SET."""
if isinstance(value, Value):
if value.datatype in (SET, LIST):
return Value(SET, set(value.data))
if value.datatype is NULL:
return Value(SET, set())
raise CastException(value, SET)
SET = Datatype("set", cast_set, ANY, lambda x: "set")
def cast_object(value):
"""Casts a value to an OBJECT."""
if isinstance(value, Value):
return Value(OBJECT, value.data)
raise CastException(value, OBJECT)
OBJECT = Datatype("object", cast_object, ANY, lambda x: "object")
def _add_operation():
"""The add operation."""
def add(context):
"""Add two number values."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
return Value(var_a.datatype, var_a.data + var_b.data)
add_node = FunctionBinding(add)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], add_node),
Signature([
Value(STRING, None, "a"),
Value(ANY, None, "b"),
], add_node),
]
return Function(signatures, "#add")
ADD_FUNCTION = _add_operation()
PLUS_OPERATOR = Operator(ADD_FUNCTION, "+")
def _sub_function():
"""The sub operation."""
def sub(context):
"""Subtract two number values."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
return Value(var_a.datatype, var_a.data - var_b.data)
sub_node = FunctionBinding(sub)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], sub_node),
]
return Function(signatures, "#sub")
SUB_FUNCTION = _sub_function()
MINUS_OPERATOR = Operator(SUB_FUNCTION, "-")
def _mul_operation():
def mul(context):
"""Multiply two numbers."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
return Value(var_a.datatype, var_a.data * var_b.data)
mul_node = FunctionBinding(mul)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], mul_node),
]
return Function(signatures, "#mul")
MUL_FUNCTION = _mul_operation()
MUL_OPERATOR = Operator(MUL_FUNCTION, "*")
def _pow_operation():
def pow(context):
"""Calculate a to the power of b."""
var_b = context.find("id", "b")
var_a = context.find("id", "a")
if var_b.datatype != var_a.datatype:
var_b = FLOAT.cast(var_b)
var_a = FLOAT.cast(var_a)
return Value(var_b.datatype, var_a.data**var_b.data)
pow_node = FunctionBinding(pow)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], pow_node)
]
return Function(signatures, "#pow")
POW_FUNCTION = _pow_operation()
POW_OPERATOR = Operator(POW_FUNCTION, "^")
def _div_operation():
def div(context):
"""Divide two numbers."""
var_a = context.find("id", "a")
var_b = var_a.datatype.cast(context.find("id", "b"))
if var_b.data == 0:
raise RuntimeException("Can not divide by 0")
result = var_a.data / var_b.data
if var_a.datatype is INTEGER:
result = int(result)
return Value(var_a.datatype, result)
div_node = FunctionBinding(div)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], div_node)
]
return Function(signatures, "#div")
DIV_FUNCTION = _div_operation()
DIV_OPERATOR = Operator(DIV_FUNCTION, "/")
def _mod_operation():
def mod(context):
"""Get the modulo of two numbers."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
if var_b.data == 0:
raise RuntimeException("Can not divide by 0")
return Value(INTEGER, var_a.data % var_b.data)
mod_node= FunctionBinding(mod)
signatures = [
Signature([
Value(INTEGER, None, "a"),
Value(INTEGER, None, "b"),
], mod_node)
]
return Function(signatures, "#mod")
MOD_FUNCTION = _mod_operation()
MOD_OPERATOR = Operator(MOD_FUNCTION, "%")
def _equ_operation():
def equ(context):
"""Checks if two values are equal."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
if var_a.datatype is not var_b.datatype:
raise RuntimeException("Two values of different types may not be compared.")
return Value(BOOLEAN, var_a == var_b)
equ_node = FunctionBinding(equ)
signatures = [
Signature([
Value(ANY, None, "a"),
Value(ANY, None, "b"),
], equ_node)
]
return Function(signatures, "#equ")
EQU_FUNCTION = _equ_operation()
EQU_OPERATOR = Operator(EQU_FUNCTION, "==")
def _and_operation():
def and_o(context):
"""Returns true if both values are true."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data and var_b.data)
and_node = FunctionBinding(and_o)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
Value(BOOLEAN, None, "b"),
], and_node),
]
return Function(signatures, "#and")
AND_FUNCTION = _and_operation()
AND_OPERATOR = Operator(AND_FUNCTION, "&&")
def _or_operation():
def or_o(context):
"""Returns true if one value is true."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data or var_b.data)
or_node = FunctionBinding(or_o)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
Value(BOOLEAN, None, "b"),
], or_node),
]
return Function(signatures, "#or")
OR_FUNCTION = _or_operation()
OR_OPERATOR = Operator(OR_FUNCTION, "||")
def _xor_operation():
def xor(context):
"""Returns true if one of the two values is true."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, (var_a.data or var_b.data) and var_a.data != var_b.data)
xor_node = FunctionBinding(xor)
signatures = [
Signature([
Value(BOOLEAN, None, "a"),
Value(BOOLEAN, None, "b"),
], xor_node),
]
return Function(signatures, "#xor")
XOR_FUNCTION = _xor_operation()
XOR_OPERATOR = Operator(XOR_FUNCTION, "^|")
def _neq_operation():
def neq(context):
"""Returns true if both values are unequal."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
if var_a.datatype is not var_b.datatype:
raise RuntimeException("Two values of different types may not be compared.")
return Value(BOOLEAN, var_a != var_b)
neq_node = FunctionBinding(neq)
signatures = [
Signature([
Value(ANY, None, "a"),
Value(ANY, None, "b"),
], neq_node),
]
return Function(signatures, "#neq")
NEQ_FUNCTION = _neq_operation()
NEQ_OPERATOR = Operator(NEQ_FUNCTION, "!=")
def _sm_operation():
def smaller(context):
"""Returns true if one value is smaller than the other."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data < var_b.data)
sm_node = FunctionBinding(smaller)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], sm_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], sm_node),
]
return Function(signatures, "#sm")
SM_FUNCTION = _sm_operation()
SM_OPERATOR = Operator(SM_FUNCTION, "<")
def _lg_operation():
def larger(context):
"""Returns true if a is larger than b."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data > var_b.data)
lg_node = FunctionBinding(larger)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], lg_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], lg_node),
]
return Function(signatures, "#lg")
LG_FUNCTION = _lg_operation()
LG_OPERATOR = Operator(LG_FUNCTION, ">")
def _sme_operation():
def sme(context):
"""Returns true if a is smaller or equal to b."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data <= var_b.data)
sme_node = FunctionBinding(sme)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], sme_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], sme_node),
]
return Function(signatures, "#sme")
SME_FUNCTION = _sme_operation()
SME_OPERATOR = Operator(SME_FUNCTION, "<=")
def _lge_operation():
def lge(context):
"""Returns true if a is larger or equal to b."""
var_a = context.find("id", "a")
var_b = context.find("id", "b")
return Value(BOOLEAN, var_a.data >= var_b.data)
lge_node = FunctionBinding(lge)
signatures = [
Signature([
Value(NUMBER, None, "a"),
Value(NUMBER, None, "b"),
], lge_node),
Signature([
Value(STRING, None, "a"),
Value(STRING, None, "b"),
], lge_node),
]
return Function(signatures, "#lge")
LGE_FUNCTION = _lge_operation()
LGE_OPERATOR = Operator(LGE_FUNCTION, ">=")
def _unmi_operation():
def unmi(context):
"""Inverts the numeric value."""
var_a = context.find("id", "a")
return Value(var_a.datatype, -var_a.data)
unmi_node = FunctionBinding(unmi)
signatures = [
Signature([
Value(NUMBER, None, "a"),
], unmi_node)
]
return Function(signatures, "#unmi")
UNMI_FUNCTION = _unmi_operation()
MINUS_OPERATOR.add_function(UNMI_FUNCTION)
def _unpl_operation():
def unpl(context):
"""Does nothing special. Added for code consistency."""
var_a = context.find("id", "a")
return Value(var_a.datatype, var_a.data)
unpl_node = FunctionBinding(unpl)
signatures = [
Signature([
Value(NUMBER, None, "a"),
], unpl_node)
]
return Function(signatures, "#unpl")
UNPL_FUNCTION = _unpl_operation()
PLUS_OPERATOR.add_function(UNPL_FUNCTION)
def _uninv_operation():
def uninv(context):
"""Inverts a bool value."""
var_a = context.find("id", "a")
return Value(var_a.datatype, | |
<filename>core/extension/android.py
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
else:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
| |
the conjugate
# as well
i_times = range(0, n_time_points-NFFT+1, NFFT-n_overlap)
n_slices = len(i_times)
FFT_slices = {}
FFT_conj_slices = {}
Pxx = {}
for i_channel in all_channels:
#dbg:
#print i_channel
Slices = np.zeros( (n_slices,n_freqs), dtype=np.complex)
for iSlice in xrange(n_slices):
thisSlice = time_series[i_channel,
i_times[iSlice]:i_times[iSlice]+NFFT]
#Windowing:
thisSlice = window_vals*thisSlice #No detrending
#Derive the fft for that slice:
Slices[iSlice,:] = (np.fft.fft(thisSlice)[lb_idx:ub_idx])
FFT_slices[i_channel] = Slices
if prefer_speed_over_memory:
FFT_conj_slices[i_channel] = np.conjugate(Slices)
cache = {'FFT_slices':FFT_slices,'FFT_conj_slices':FFT_conj_slices,
'norm_val':norm_val}
return freqs,cache
def cache_to_psd(cache,ij):
"""
From a set of cached windowed fft, calculate the psd
Parameters
----------
cache : dict
Return value from :func:`cache_fft`
ij : list
A list of tuples of the form (i,j).
Returns
-------
Pxx : dict
The phases for the intersection of (time_series[i],time_series[j]). The
keys are the intersection of i,j values in the parameter ij
"""
#This is the way it is saved by cache_spectra:
FFT_slices=cache['FFT_slices']
FFT_conj_slices=cache['FFT_conj_slices']
norm_val=cache['norm_val']
#This is where the output goes to:
Pxx = {}
all_channels = set()
for i,j in ij:
all_channels.add(i); all_channels.add(j)
n_channels = len(all_channels)
for i in all_channels:
#dbg:
#print i
#If we made the conjugate slices:
if FFT_conj_slices:
Pxx[i] = FFT_slices[i] * FFT_conj_slices[i]
else:
Pxx[i] = FFT_slices[i] * np.conjugate(FFT_slices[i])
#If there is more than one window
if FFT_slices[i].shape[0]>1:
Pxx[i] = np.mean(Pxx[i],0)
Pxx[i] /= norm_val
return Pxx
def cache_to_phase(cache,ij):
""" From a set of cached set of windowed fft's, calculate the
frequency-band dependent phase for each of the channels in ij.
Note that this returns the absolute phases of the time-series, not the
relative phases between them. In order to get relative phases, use
cache_to_relative_phase
Parameters
----------
cache : dict
The return value of :func:`cache_fft`
ij: list
A list of tuples of the form (i,j) for all the indices for which to
calculate the phases
Returns
-------
Phase : dict
The individual phases, keys are all the i and j in ij, such that
Phase[i] gives you the phase for the time-series i in the input to
:func:`cache_fft`
"""
FFT_slices=cache['FFT_slices']
Phase = {}
all_channels = set()
for i,j in ij:
all_channels.add(i); all_channels.add(j)
n_channels = len(all_channels)
for i in all_channels:
Phase[i] = np.angle(FFT_slices[i])
#If there is more than one window, average over all the windows:
if FFT_slices[i].shape[0]>1:
Phase[i] = np.mean(Phase[i],0)
return Phase
def cache_to_relative_phase(cache,ij):
""" From a set of cached set of windowed fft's, calculate the
frequency-band dependent relative phase for the combinations ij.
Parameters
----------
cache: dict
The return value from :func:`cache_fft`
ij: list
A list of tuples of the form (i,j), all the pairs of indices for which to
calculate the relative phases
Returns
-------
Phi_xy : dict
The relative phases between the time-series i and j. Such that
Phi_xy[i,j] is the phase from time_series[i] to time_series[j].
Note
----
This function will give you a different result than using
:func:`coherency_phase_spectrum`. This is because
:func:`coherency_phase_spectrum` calculates the angle based on the average
psd, whereas this function calculates the average of the angles calculated
on individual windows.
"""
#This is the way it is saved by cache_spectra:
FFT_slices=cache['FFT_slices']
FFT_conj_slices=cache['FFT_conj_slices']
norm_val=cache['norm_val']
freqs = cache['FFT_slices'][ij[0][0]].shape[-1]
ij_array = np.array(ij)
channels_i = max(1,max(ij_array[:,0])+1)
channels_j = max(1,max(ij_array[:,1])+1)
#Pre-allocate for speed:
Phi_xy = np.empty((channels_i,channels_j,freqs),dtype=np.complex)
#These checks take time, so do them up front, not in every iteration:
if FFT_slices.items()[0][1].shape[0]>1:
if FFT_conj_slices:
for i,j in ij:
phi = np.angle(FFT_slices[i] * FFT_conj_slices[j])
Phi_xy[i,j] = np.mean(phi,0)
else:
for i,j in ij:
phi = np.angle(FFT_slices[i] * np.conjugate(FFT_slices[j]))
Phi_xy[i,j] = np.mean(phi,0)
else:
if FFT_conj_slices:
for i,j in ij:
Phi_xy[i,j] = np.angle(FFT_slices[i] * FFT_conj_slices[j])
else:
for i,j in ij:
Phi_xy[i,j] = np.angle(FFT_slices[i]*np.conjugate(FFT_slices[j]))
return Phi_xy
def cache_to_coherency(cache,ij):
"""From a set of cached spectra, calculate the coherency
relationships
Parameters
----------
cache: dict
the return value from :func:`cache_fft`
ij: list
a list of (i,j) tuples, the pairs of indices for which the
cross-coherency is to be calculated
Returns
-------
Cxy: dict
coherence values between the time-series ij. Indexing into this dict
takes the form Cxy[i,j] in order to extract the coherency between
time-series i and time-series j in the original input to
:func:`cache_fft`
"""
#This is the way it is saved by cache_spectra:
FFT_slices=cache['FFT_slices']
FFT_conj_slices=cache['FFT_conj_slices']
norm_val=cache['norm_val']
freqs = cache['FFT_slices'][ij[0][0]].shape[-1]
ij_array = np.array(ij)
channels_i = max(1,max(ij_array[:,0])+1)
channels_j = max(1,max(ij_array[:,1])+1)
Cxy = np.empty((channels_i,channels_j,freqs),dtype=np.complex)
#These checks take time, so do them up front, not in every iteration:
if FFT_slices.items()[0][1].shape[0]>1:
if FFT_conj_slices:
for i,j in ij:
#dbg:
#print i,j
Pxy = FFT_slices[i] * FFT_conj_slices[j]
Pxx = FFT_slices[i] * FFT_conj_slices[i]
Pyy = FFT_slices[j] * FFT_conj_slices[j]
Pxx = np.mean(Pxx,0)
Pyy = np.mean(Pyy,0)
Pxy = np.mean(Pxy,0)
Pxy /= norm_val
Pxx /= norm_val
Pyy /= norm_val
Cxy[i,j] = Pxy / np.sqrt(Pxx*Pyy)
else:
for i,j in ij:
Pxy = FFT_slices[i] * np.conjugate(FFT_slices[j])
Pxx = FFT_slices[i] * np.conjugate(FFT_slices[i])
Pyy = FFT_slices[j] * np.conjugate(FFT_slices[j])
Pxx = np.mean(Pxx,0)
Pyy = np.mean(Pyy,0)
Pxy = np.mean(Pxy,0)
Pxy /= norm_val
Pxx /= norm_val
Pyy /= norm_val
Cxy[i,j] = Pxy / np.sqrt(Pxx*Pyy)
else:
if FFT_conj_slices:
for i,j in ij:
Pxy = FFT_slices[i] * FFT_conj_slices[j]
Pxx = FFT_slices[i] * FFT_conj_slices[i]
Pyy = FFT_slices[j] * FFT_conj_slices[j]
Pxy /= norm_val
Pxx /= norm_val
Pyy /= norm_val
Cxy[i,j] = Pxy / np.sqrt(Pxx*Pyy)
else:
for i,j in ij:
Pxy = FFT_slices[i] * np.conjugate(FFT_slices[j])
Pxx = FFT_slices[i] * np.conjugate(FFT_slices[i])
Pyy = FFT_slices[j] * np.conjugate(FFT_slices[j])
Pxy /= norm_val
Pxx /= norm_val
Pyy /= norm_val
Cxy[i,j] = Pxy / np.sqrt(Pxx*Pyy)
return Cxy
#-----------------------------------------------------------------------------
# Wavelets
#-----------------------------------------------------------------------------
#TODO:
# * Write tests for various morlet wavelets
# * Possibly write 'full morlet wavelet' function
def wfmorlet_fft(f0,sd,sampling_rate,ns=5,nt=None):
"""
returns a complex morlet wavelet in the frequency domain
Parameters
----------
f0 : center frequency
sd : standard deviation of center frequency
sampling_rate : samplingrate
ns : window length in number of stanard deviations
nt : window length in number of sample points
"""
if nt==None:
st = 1./(2.*np.pi*sd)
nt = 2*int(ns*st*sampling_rate)+1
f = np.fft.fftfreq(nt,1./sampling_rate)
wf = 2*np.exp(-(f-f0)**2/(2*sd**2))*np.sqrt(sampling_rate/(np.sqrt(np.pi)*sd))
wf[f<0] = 0
wf[f==0] /= 2
return wf
def wmorlet(f0,sd,sampling_rate,ns=5,normed='area'):
"""
returns a complex morlet wavelet in the time domain
Parameters
----------
f0 : center frequency
sd : standard deviation of frequency
sampling_rate : samplingrate
ns : window length in number of stanard deviations
"""
st = 1./(2.*np.pi*sd)
w_sz = float(int(ns*st*sampling_rate)) # half time window size
t = np.arange(-w_sz,w_sz+1,dtype=float)/sampling_rate
if normed == 'area':
w = np.exp(-t**2/(2.*st**2))*np.exp(
2j*np.pi*f0*t)/np.sqrt(np.sqrt(np.pi)*st*sampling_rate)
elif normed == 'max':
w = np.exp(-t**2/(2.*st**2))*np.exp(
2j*np.pi*f0*t)*2*sd*np.sqrt(2*np.pi)/sampling_rate
else:
assert 0, 'unknown norm %s'%normed
return w
def wlogmorlet_fft(f0,sd,sampling_rate,ns=5,nt=None):
"""
returns a complex log morlet wavelet in the frequency domain
Parameters
----------
f0 : center frequency
sd : standard deviation
sampling_rate : samplingrate
ns : window length in number of stanard deviations
nt : window length in number of sample points
"""
if nt==None:
st = 1./(2.*np.pi*sd)
nt = 2*int(ns*st*sampling_rate)+1
f = np.fft.fftfreq(nt,1./sampling_rate)
sfl = np.log(1+1.*sd/f0)
wf = 2*np.exp(-(np.log(f)-np.log(f0))**2/(2*sfl**2))*np.sqrt(sampling_rate/(np.sqrt(np.pi)*sd))
wf[f<0] = 0
wf[f==0] /= 2
return wf
def wlogmorlet(f0,sd,sampling_rate,ns=5,normed='area'):
"""
returns a complex log morlet wavelet in the time domain
Parameters
----------
f0 : center frequency
sd : standard deviation of frequency
sampling_rate : samplingrate
ns : window length in number of stanard deviations
"""
st = 1./(2.*np.pi*sd)
w_sz = int(ns*st*sampling_rate) # half time window size
wf = wlogmorlet_fft(f0,sd,sampling_rate=sampling_rate,nt=2*w_sz+1)
w = np.fft.fftshift(np.fft.ifft(wf))
if normed == 'area':
w /= w.real.sum()
elif normed == 'max':
w /= w.real.max()
elif normed == 'energy':
w /= np.sqrt((w**2).sum())
else:
assert 0, 'unknown norm %s'%normed
return w
#-----------------------------------------------------------------------------
# Granger causality analysis
#-----------------------------------------------------------------------------
def transfer_function_xy(a, Nfreqs=1024):
"""Helper routine to compute the transfer function H(w) based
on sequence of coefficient matrices A(i). The z transforms
follow from this definition:
X[t] + sum_{k=1}^P a[k]X[t-k] = Err[t]
Parameters
----------
a : ndarray, shape (P, 2, 2)
sequence of coef matrices describing an mAR process
Nfreqs : int, optional
number of frequencies to compute in range [0,PI]
Returns
-------
Hw : ndarray
The transfer function from innovations process vector to
mAR process X
"""
# these concatenations follow from the observation that A(0) is
# implicitly the | |
5:
return 'aerddpssm'
if table2Version == 215 and indicatorOfParameter == 4:
return 'aerddpsss'
if table2Version == 215 and indicatorOfParameter == 3:
return 'aersrcssl'
if table2Version == 215 and indicatorOfParameter == 2:
return 'aersrcssm'
if table2Version == 215 and indicatorOfParameter == 1:
return 'aersrcsss'
if table2Version == 214 and indicatorOfParameter == 52:
return 'aot340'
if table2Version == 214 and indicatorOfParameter == 51:
return 'uvsflxcs395400'
if table2Version == 214 and indicatorOfParameter == 50:
return 'uvsflxcs390395'
if table2Version == 214 and indicatorOfParameter == 49:
return 'uvsflxcs385390'
if table2Version == 214 and indicatorOfParameter == 48:
return 'uvsflxcs380385'
if table2Version == 214 and indicatorOfParameter == 47:
return 'uvsflxcs375380'
if table2Version == 214 and indicatorOfParameter == 46:
return 'uvsflxcs370375'
if table2Version == 214 and indicatorOfParameter == 45:
return 'uvsflxcs365370'
if table2Version == 214 and indicatorOfParameter == 44:
return 'uvsflxcs360365'
if table2Version == 214 and indicatorOfParameter == 43:
return 'uvsflxcs355360'
if table2Version == 214 and indicatorOfParameter == 42:
return 'uvsflxcs350355'
if table2Version == 214 and indicatorOfParameter == 41:
return 'uvsflxcs345350'
if table2Version == 214 and indicatorOfParameter == 40:
return 'uvsflxcs340345'
if table2Version == 214 and indicatorOfParameter == 39:
return 'uvsflxcs335340'
if table2Version == 214 and indicatorOfParameter == 38:
return 'uvsflxcs330335'
if table2Version == 214 and indicatorOfParameter == 37:
return 'uvsflxcs325330'
if table2Version == 214 and indicatorOfParameter == 36:
return 'uvsflxcs320325'
if table2Version == 214 and indicatorOfParameter == 35:
return 'uvsflxcs315320'
if table2Version == 214 and indicatorOfParameter == 34:
return 'uvsflxcs310315'
if table2Version == 214 and indicatorOfParameter == 33:
return 'uvsflxcs305310'
if table2Version == 214 and indicatorOfParameter == 32:
return 'uvsflxcs300305'
if table2Version == 214 and indicatorOfParameter == 31:
return 'uvsflxcs295300'
if table2Version == 214 and indicatorOfParameter == 30:
return 'uvsflxcs290295'
if table2Version == 214 and indicatorOfParameter == 29:
return 'uvsflxcs285290'
if table2Version == 214 and indicatorOfParameter == 28:
return 'uvsflxcs280285'
if table2Version == 214 and indicatorOfParameter == 27:
return 'uvsflxt395400'
if table2Version == 214 and indicatorOfParameter == 26:
return 'uvsflxt390395'
if table2Version == 214 and indicatorOfParameter == 25:
return 'uvsflxt385390'
if table2Version == 214 and indicatorOfParameter == 24:
return 'uvsflxt380385'
if table2Version == 214 and indicatorOfParameter == 23:
return 'uvsflxt375380'
if table2Version == 214 and indicatorOfParameter == 22:
return 'uvsflxt370375'
if table2Version == 214 and indicatorOfParameter == 21:
return 'uvsflxt365370'
if table2Version == 214 and indicatorOfParameter == 20:
return 'uvsflxt360365'
if table2Version == 214 and indicatorOfParameter == 19:
return 'uvsflxt355360'
if table2Version == 214 and indicatorOfParameter == 18:
return 'uvsflxt350355'
if table2Version == 214 and indicatorOfParameter == 17:
return 'uvsflxt345350'
if table2Version == 214 and indicatorOfParameter == 16:
return 'uvsflxt340345'
if table2Version == 214 and indicatorOfParameter == 15:
return 'uvsflxt335340'
if table2Version == 214 and indicatorOfParameter == 14:
return 'uvsflxt330335'
if table2Version == 214 and indicatorOfParameter == 13:
return 'uvsflxt325330'
if table2Version == 214 and indicatorOfParameter == 12:
return 'uvsflxt320325'
if table2Version == 214 and indicatorOfParameter == 11:
return 'uvsflxt315320'
if table2Version == 214 and indicatorOfParameter == 10:
return 'uvsflxt310315'
if table2Version == 214 and indicatorOfParameter == 9:
return 'uvsflxt305310'
if table2Version == 214 and indicatorOfParameter == 8:
return 'uvsflxt300305'
if table2Version == 214 and indicatorOfParameter == 7:
return 'uvsflxt295300'
if table2Version == 214 and indicatorOfParameter == 6:
return 'uvsflxt290295'
if table2Version == 214 and indicatorOfParameter == 5:
return 'uvsflxt285290'
if table2Version == 214 and indicatorOfParameter == 4:
return 'uvsflxt280285'
if table2Version == 214 and indicatorOfParameter == 3:
return 'uvbedcs'
if table2Version == 214 and indicatorOfParameter == 2:
return 'uvbed'
if table2Version == 214 and indicatorOfParameter == 1:
return 'uvcossza'
if table2Version == 213 and indicatorOfParameter == 150:
return 'spp50'
if table2Version == 213 and indicatorOfParameter == 149:
return 'spp49'
if table2Version == 213 and indicatorOfParameter == 148:
return 'spp48'
if table2Version == 213 and indicatorOfParameter == 147:
return 'spp47'
if table2Version == 213 and indicatorOfParameter == 146:
return 'spp46'
if table2Version == 213 and indicatorOfParameter == 145:
return 'spp45'
if table2Version == 213 and indicatorOfParameter == 144:
return 'spp44'
if table2Version == 213 and indicatorOfParameter == 143:
return 'spp43'
if table2Version == 213 and indicatorOfParameter == 142:
return 'spp42'
if table2Version == 213 and indicatorOfParameter == 141:
return 'spp41'
if table2Version == 213 and indicatorOfParameter == 140:
return 'spp40'
if table2Version == 213 and indicatorOfParameter == 139:
return 'spp39'
if table2Version == 213 and indicatorOfParameter == 138:
return 'spp38'
if table2Version == 213 and indicatorOfParameter == 137:
return 'spp37'
if table2Version == 213 and indicatorOfParameter == 136:
return 'spp36'
if table2Version == 213 and indicatorOfParameter == 135:
return 'spp35'
if table2Version == 213 and indicatorOfParameter == 134:
return 'spp34'
if table2Version == 213 and indicatorOfParameter == 133:
return 'spp33'
if table2Version == 213 and indicatorOfParameter == 132:
return 'spp32'
if table2Version == 213 and indicatorOfParameter == 131:
return 'spp31'
if table2Version == 213 and indicatorOfParameter == 130:
return 'spp30'
if table2Version == 213 and indicatorOfParameter == 129:
return 'spp29'
if table2Version == 213 and indicatorOfParameter == 128:
return 'spp28'
if table2Version == 213 and indicatorOfParameter == 127:
return 'spp27'
if table2Version == 213 and indicatorOfParameter == 126:
return 'spp26'
if table2Version == 213 and indicatorOfParameter == 125:
return 'spp25'
if table2Version == 213 and indicatorOfParameter == 124:
return 'spp24'
if table2Version == 213 and indicatorOfParameter == 123:
return 'spp23'
if table2Version == 213 and indicatorOfParameter == 122:
return 'spp22'
if table2Version == 213 and indicatorOfParameter == 121:
return 'spp21'
if table2Version == 213 and indicatorOfParameter == 120:
return 'spp20'
if table2Version == 213 and indicatorOfParameter == 119:
return 'spp19'
if table2Version == 213 and indicatorOfParameter == 118:
return 'spp18'
if table2Version == 213 and indicatorOfParameter == 117:
return 'spp17'
if table2Version == 213 and indicatorOfParameter == 116:
return 'spp16'
if table2Version == 213 and indicatorOfParameter == 115:
return 'spp15'
if table2Version == 213 and indicatorOfParameter == 114:
return 'spp14'
if table2Version == 213 and indicatorOfParameter == 113:
return 'spp13'
if table2Version == 213 and indicatorOfParameter == 112:
return 'spp12'
if table2Version == 213 and indicatorOfParameter == 111:
return 'spp11'
if table2Version == 213 and indicatorOfParameter == 110:
return 'spp10'
if table2Version == 213 and indicatorOfParameter == 109:
return 'spp9'
if table2Version == 213 and indicatorOfParameter == 108:
return 'spp8'
if table2Version == 213 and indicatorOfParameter == 107:
return 'spp7'
if table2Version == 213 and indicatorOfParameter == 106:
return 'spp6'
if table2Version == 213 and indicatorOfParameter == 105:
return 'spp5'
if table2Version == 213 and indicatorOfParameter == 104:
return 'spp4'
if table2Version == 213 and indicatorOfParameter == 103:
return 'spp3'
if table2Version == 213 and indicatorOfParameter == 102:
return 'spp2'
if table2Version == 213 and indicatorOfParameter == 101:
return 'spp1'
if table2Version == 213 and indicatorOfParameter == 5:
return 'sppt5'
if table2Version == 213 and indicatorOfParameter == 4:
return 'sppt4'
if table2Version == 213 and indicatorOfParameter == 3:
return 'sppt3'
if table2Version == 213 and indicatorOfParameter == 2:
return 'sppt2'
if table2Version == 213 and indicatorOfParameter == 1:
return 'sppt1'
if table2Version == 212 and indicatorOfParameter == 255:
return '~'
if table2Version == 212 and indicatorOfParameter == 254:
return '~'
if table2Version == 212 and indicatorOfParameter == 253:
return '~'
if table2Version == 212 and indicatorOfParameter == 252:
return '~'
if table2Version == 212 and indicatorOfParameter == 251:
return '~'
if table2Version == 212 and indicatorOfParameter == 250:
return '~'
if table2Version == 212 and indicatorOfParameter == 249:
return '~'
if table2Version == 212 and indicatorOfParameter == 248:
return '~'
if table2Version == 212 and indicatorOfParameter == 247:
return '~'
if table2Version == 212 and indicatorOfParameter == 246:
return '~'
if table2Version == 212 and indicatorOfParameter == 245:
return '~'
if table2Version == 212 and indicatorOfParameter == 244:
return '~'
if table2Version == 212 and indicatorOfParameter == 243:
return '~'
if table2Version == 212 and indicatorOfParameter == 242:
return '~'
if table2Version == 212 and indicatorOfParameter == 241:
return '~'
if table2Version == 212 and indicatorOfParameter == 240:
return '~'
if table2Version == 212 and indicatorOfParameter == | |
"""
Functions for processing input commands.
All global functions in this module whose name does not start with "_"
is considered an inputfunc. Each function must have the following
callsign (where inputfunc name is always lower-case, no matter what the
OOB input name looked like):
inputfunc(session, *args, **kwargs)
Where "options" is always one of the kwargs, containing eventual
protocol-options.
There is one special function, the "default" function, which is called
on a no-match. It has this callsign:
default(session, cmdname, *args, **kwargs)
Evennia knows which modules to use for inputfuncs by
settings.INPUT_FUNC_MODULES.
"""
import importlib
from codecs import lookup as codecs_lookup
from django.conf import settings
from evennia.commands.cmdhandler import cmdhandler
from evennia.accounts.models import AccountDB
from evennia.utils.logger import log_err
from evennia.utils.utils import to_str
BrowserSessionStore = importlib.import_module(settings.SESSION_ENGINE).SessionStore
# always let "idle" work since we use this in the webclient
_IDLE_COMMAND = settings.IDLE_COMMAND
_IDLE_COMMAND = (_IDLE_COMMAND,) if _IDLE_COMMAND == "idle" else (_IDLE_COMMAND, "idle")
_GA = object.__getattribute__
_SA = object.__setattr__
def _NA(o):
return "N/A"
_ERROR_INPUT = "Inputfunc {name}({session}): Wrong/unrecognized input: {inp}"
# All global functions are inputfuncs available to process inputs
def text(session, *args, **kwargs):
"""
Main text input from the client. This will execute a command
string on the server.
Args:
session (Session): The active Session to receive the input.
text (str): First arg is used as text-command input. Other
arguments are ignored.
"""
# from evennia.server.profiling.timetrace import timetrace
# text = timetrace(text, "ServerSession.data_in")
txt = args[0] if args else None
# explicitly check for None since text can be an empty string, which is
# also valid
if txt is None:
return
# this is treated as a command input
# handle the 'idle' command
if txt.strip() in _IDLE_COMMAND:
session.update_session_counters(idle=True)
return
"""
if session.account:
# nick replacement
puppet = session.puppet
if puppet:
txt = puppet.nicks.nickreplace(
txt, categories=("inputline", "channel"), include_account=True
)
else:
txt = session.account.nicks.nickreplace(
txt, categories=("inputline", "channel"), include_account=False
)
"""
kwargs.pop("options", None)
cmdhandler(session, txt, callertype="session", session=session, **kwargs)
session.update_session_counters()
def bot_data_in(session, *args, **kwargs):
"""
Text input from the IRC and RSS bots.
This will trigger the execute_cmd method on the bots in-game counterpart.
Args:
session (Session): The active Session to receive the input.
text (str): First arg is text input. Other arguments are ignored.
"""
txt = args[0] if args else None
# Explicitly check for None since text can be an empty string, which is
# also valid
if txt is None:
return
# this is treated as a command input
# handle the 'idle' command
if txt.strip() in _IDLE_COMMAND:
session.update_session_counters(idle=True)
return
kwargs.pop("options", None)
# Trigger the execute_cmd method of the corresponding bot.
session.account.execute_cmd(session=session, txt=txt, **kwargs)
session.update_session_counters()
def echo(session, *args, **kwargs):
"""
Echo test function
"""
session.data_out(text="Echo returns: %s" % args)
def default(session, cmdname, *args, **kwargs):
"""
Default catch-function. This is like all other input functions except
it will get `cmdname` as the first argument.
"""
err = (
"Session {sessid}: Input command not recognized:\n"
" name: '{cmdname}'\n"
" args, kwargs: {args}, {kwargs}".format(
sessid=session.sessid, cmdname=cmdname, args=args, kwargs=kwargs
)
)
if session.protocol_flags.get("INPUTDEBUG", False):
session.msg(err)
log_err(err)
_CLIENT_OPTIONS = (
"ANSI",
"XTERM256",
"MXP",
"UTF-8",
"SCREENREADER",
"ENCODING",
"MCCP",
"SCREENHEIGHT",
"SCREENWIDTH",
"INPUTDEBUG",
"RAW",
"NOCOLOR",
"NOGOAHEAD",
)
def client_options(session, *args, **kwargs):
"""
This allows the client an OOB way to inform us about its name and capabilities.
This will be integrated into the session settings
Kwargs:
get (bool): If this is true, return the settings as a dict
(ignore all other kwargs).
client (str): A client identifier, like "mushclient".
version (str): A client version
ansi (bool): Supports ansi colors
xterm256 (bool): Supports xterm256 colors or not
mxp (bool): Supports MXP or not
utf-8 (bool): Supports UTF-8 or not
screenreader (bool): Screen-reader mode on/off
mccp (bool): MCCP compression on/off
screenheight (int): Screen height in lines
screenwidth (int): Screen width in characters
inputdebug (bool): Debug input functions
nocolor (bool): Strip color
raw (bool): Turn off parsing
"""
old_flags = session.protocol_flags
if not kwargs or kwargs.get("get", False):
# return current settings
options = dict((key, old_flags[key]) for key in old_flags if key.upper() in _CLIENT_OPTIONS)
session.msg(client_options=options)
return
def validate_encoding(val):
# helper: change encoding
try:
codecs_lookup(val)
except LookupError:
raise RuntimeError("The encoding '|w%s|n' is invalid. " % val)
return val
def validate_size(val):
return {0: int(val)}
def validate_bool(val):
if isinstance(val, str):
return True if val.lower() in ("true", "on", "1") else False
return bool(val)
flags = {}
for key, value in kwargs.items():
key = key.lower()
if key == "client":
flags["CLIENTNAME"] = to_str(value)
elif key == "version":
if "CLIENTNAME" in flags:
flags["CLIENTNAME"] = "%s %s" % (flags["CLIENTNAME"], to_str(value))
elif key == "ENCODING":
flags["ENCODING"] = validate_encoding(value)
elif key == "ansi":
flags["ANSI"] = validate_bool(value)
elif key == "xterm256":
flags["XTERM256"] = validate_bool(value)
elif key == "mxp":
flags["MXP"] = validate_bool(value)
elif key == "utf-8":
flags["UTF-8"] = validate_bool(value)
elif key == "screenreader":
flags["SCREENREADER"] = validate_bool(value)
elif key == "mccp":
flags["MCCP"] = validate_bool(value)
elif key == "screenheight":
flags["SCREENHEIGHT"] = validate_size(value)
elif key == "screenwidth":
flags["SCREENWIDTH"] = validate_size(value)
elif key == "inputdebug":
flags["INPUTDEBUG"] = validate_bool(value)
elif key == "nocolor":
flags["NOCOLOR"] = validate_bool(value)
elif key == "raw":
flags["RAW"] = validate_bool(value)
elif key == "nogoahead":
flags["NOGOAHEAD"] = validate_bool(value)
elif key in (
"Char 1",
"Char.Skills 1",
"Char.Items 1",
"Room 1",
"IRE.Rift 1",
"IRE.Composer 1",
):
# ignore mudlet's default send (aimed at IRE games)
pass
elif key not in ("options", "cmdid"):
err = _ERROR_INPUT.format(name="client_settings", session=session, inp=key)
session.msg(text=err)
session.protocol_flags.update(flags)
# we must update the protocol flags on the portal session copy as well
session.sessionhandler.session_portal_partial_sync({session.sessid: {"protocol_flags": flags}})
def get_client_options(session, *args, **kwargs):
"""
Alias wrapper for getting options.
"""
client_options(session, get=True)
def get_inputfuncs(session, *args, **kwargs):
"""
Get the keys of all available inputfuncs. Note that we don't get
it from this module alone since multiple modules could be added.
So we get it from the sessionhandler.
"""
inputfuncsdict = dict(
(key, func.__doc__) for key, func in session.sessionhandler.get_inputfuncs().items()
)
session.msg(get_inputfuncs=inputfuncsdict)
def login(session, *args, **kwargs):
"""
Peform a login. This only works if session is currently not logged
in. This will also automatically throttle too quick attempts.
Kwargs:
name (str): Account name
password (<PASSWORD>): <PASSWORD>
"""
if not session.logged_in and "name" in kwargs and "password" in kwargs:
from evennia.commands.default.unloggedin import create_normal_account
account = create_normal_account(session, kwargs["name"], kwargs["password"])
if account:
session.sessionhandler.login(session, account)
_gettable = {
"name": lambda obj: obj.key,
"key": lambda obj: obj.key,
"location": lambda obj: obj.location.key if obj.location else "None",
"servername": lambda obj: settings.SERVERNAME,
}
def get_value(session, *args, **kwargs):
"""
Return the value of a given attribute or db_property on the
session's current account or character.
Kwargs:
name (str): Name of info value to return. Only names
in the _gettable dictionary earlier in this module
are accepted.
"""
name = kwargs.get("name", "")
obj = session.puppet or session.account
if name in _gettable:
session.msg(get_value={"name": name, "value": _gettable[name](obj)})
def _testrepeat(**kwargs):
"""
This is a test function for using with the repeat
inputfunc.
Kwargs:
session (Session): Session to return to.
"""
import time
kwargs["session"].msg(repeat="Repeat called: %s" % time.time())
_repeatable = {"test1": _testrepeat, "test2": _testrepeat} # example only # "
def repeat(session, *args, **kwargs):
"""
Call a named function repeatedly. Note that
this is meant as an example of limiting the number of
possible call functions.
Kwargs:
callback (str): The function to call. Only functions
from the _repeatable dictionary earlier in this
module are available.
interval (int): How often to call function (s).
Defaults to once every 60 seconds with a minimum
of 5 seconds.
stop (bool): Stop a previously assigned ticker with
the above settings.
"""
from evennia.scripts.tickerhandler import TICKER_HANDLER
name = kwargs.get("callback", "")
interval = max(5, int(kwargs.get("interval", 60)))
if name in _repeatable:
if kwargs.get("stop", False):
TICKER_HANDLER.remove(
interval, _repeatable[name], idstring=session.sessid, persistent=False
)
else:
TICKER_HANDLER.add(
interval,
_repeatable[name],
idstring=session.sessid,
persistent=False,
session=session,
)
else:
session.msg("Allowed repeating functions are: %s" % (", ".join(_repeatable)))
def unrepeat(session, *args, **kwargs):
"Wrapper for OOB use"
kwargs["stop"] = True
repeat(session, *args, **kwargs)
_monitorable = {"name": "db_key", "location": "db_location", "desc": "desc"}
def _on_monitor_change(**kwargs):
fieldname = kwargs["fieldname"]
obj = kwargs["obj"]
name = kwargs["name"]
session = kwargs["session"]
outputfunc_name = kwargs["outputfunc_name"]
# the session may be None if the char quits and someone
# else then edits the object
if session:
callsign = {outputfunc_name: {"name": name, "value": _GA(obj, fieldname)}}
session.msg(**callsign)
def monitor(session, *args, **kwargs):
"""
Adds monitoring to a given property or Attribute.
Kwargs:
name (str): The name of the | |
<reponame>OpenMDAO-Plugins/flops_wrapper
"""
OpenMDAO Wrapper for Flops
Automatically generated from flops.scriptWrapper with parse_phoenixwrapper.
This wrapper is based on the ModelCenter Java wrapper, version 2.00 Beta
"""
# pylint: disable-msg=E0611,F0401,E1101
from numpy import int64 as numpy_int64
from numpy import float64 as numpy_float64
from numpy import str as numpy_str
from numpy import zeros, array
from openmdao.util.filewrap import FileParser
from openmdao.util.namelist_util import Namelist
from openmdao.main.api import VariableTree, FileMetadata
from openmdao.lib.datatypes.api import Str, Bool, Int, Array, Enum, Float, \
File, List, VarTree
from openmdao.lib.components.api import ExternalCode
# pylint: disable-msg=C0301,C0324,C0103,R0903
class FlopsWrapper_output_Weight_Wing(VariableTree):
"""Container for output.Weight.Wing"""
# OpenMDAO Public Variables
w = Float(0.0, desc='Bending material factor. For detailed wing definition, this factor is calculated by numerical integration along the specified load path to determine the amount of bending material required to support an elliptical load distribution. The wing is treated as an idealized beam with dimensions proportional to the wing local chord and thickness. The bending factor is modified for aeroelastic penalties (flutter, divergence, and aeroelastic loads) depending on wing sweep (including forward), aspect ratio, degree of aeroelastic tailoring, and strut bracing, if any. These modifications are based on a curve fit of the results of a study performed using the Aeroelastic Tailoring and Structural Optimization (ATSO) code to structurally optimize a large matrix of wings.\n\nIf the detailed wing definition is not used, an equivalent bending factor is computed assuming a trapezoidal wing with constant t/c.')
ew = Float(0.0, desc='Engine inertia relief factor.')
w1 = Float(0.0, desc='The first term in the wing weight is the bending factor. It is adjusted for inertia relief for the wing itself and for any engines on the wing.')
w2 = Float(0.0, desc='The second term represents control surfaces and shear material. According to structural and statistical studies conducted during weight module development, the weight of spars and ribs depends almost entirely on control surfaces. The amount of shear material required to carry structural loads is not critical.')
w3 = Float(0.0, desc='The third term depends entirely on wing area and covers multitude of miscellaneous items.')
class FlopsWrapper_output_Weight_Inertia(VariableTree):
"""Container for output.Weight.Inertia"""
# OpenMDAO Public Variables
cgx = Array(dtype=numpy_float64)
cgy = Array(dtype=numpy_float64)
cgz = Array(dtype=numpy_float64)
ixxroll = Array(dtype=numpy_float64)
ixxptch = Array(dtype=numpy_float64)
ixxyaw = Array(dtype=numpy_float64)
ixz = Array(dtype=numpy_float64)
class FlopsWrapper_output_Weight(VariableTree):
"""Container for output.Weight"""
# OpenMDAO Public Variables
dowe = Float(0.0)
paylod = Float(0.0)
fuel = Float(0.0)
rampwt = Float(0.0)
wsr = Float(0.0)
thrso = Float(0.0)
esf = Float(0.0)
twr = Float(0.0)
wldg = Float(0.0)
fultot = Float(0.0)
exsful = Float(0.0)
frwi = Float(0.0)
frht = Float(0.0)
frvt = Float(0.0)
frfin = Float(0.0)
frcan = Float(0.0)
frfu = Float(0.0)
wlg = Float(0.0)
frna = Float(0.0)
wengt = Float(0.0)
wthr = Float(0.0)
wpmisc = Float(0.0)
wfsys = Float(0.0)
frsc = Float(0.0)
wapu = Float(0.0)
win = Float(0.0)
whyd = Float(0.0)
welec = Float(0.0)
wavonc = Float(0.0)
wfurn = Float(0.0)
wac = Float(0.0)
wai = Float(0.0)
wempty = Float(0.0)
wflcrbw = Float(0.0)
wwstuab = Float(0.0)
wuf = Float(0.0)
woil = Float(0.0)
wsrv = Float(0.0)
zfw = Float(0.0)
wbomb = Float(0.0)
# VariableTrees
Inertia = VarTree(FlopsWrapper_output_Weight_Inertia())
Wing = VarTree(FlopsWrapper_output_Weight_Wing())
class FlopsWrapper_output_Plot_Files(VariableTree):
"""Container for output.Plot_Files"""
# OpenMDAO Public Variables
# TODO - Do we really need to read these in every time? Let's not for now.
#cnfile = File(iotype='out', desc='Contour or thumbprint plot data file')
#msfile = File(iotype='out', desc='Mission summary data file')
#crfile = File(iotype='out', desc='Cruise schedule summary data file')
#tofile = File(iotype='out', desc='Takeoff and landing aerodynamic and thrust data file')
#nofile = File(iotype='out', desc='Takeoff and climb profile data file')
#apfile = File(iotype='out', desc='Drag polar plot data file')
#thfile = File(iotype='out', desc='Engine plot data file name')
#hsfile = File(iotype='out', desc='Design history plot file')
#psfile = File(iotype='out', desc='Excess power and load factor plot data file')
class FlopsWrapper_output_Performance_Segments(VariableTree):
"""Container for output.Performance.Segments"""
# OpenMDAO Public Variables
segment = Array(dtype=numpy_str)
weights = Array(dtype=numpy_float64)
alts = Array(dtype=numpy_float64)
machs = Array(dtype=numpy_float64)
thrusts = Array(dtype=numpy_float64)
totmaxs = Array(dtype=numpy_float64)
lods = Array(dtype=numpy_float64)
sfcs = Array(dtype=numpy_float64)
engparms = Array(dtype=numpy_float64)
weighte = Array(dtype=numpy_float64)
alte = Array(dtype=numpy_float64)
mache = Array(dtype=numpy_float64)
thruste = Array(dtype=numpy_float64)
totmaxe = Array(dtype=numpy_float64)
lode = Array(dtype=numpy_float64)
sfce = Array(dtype=numpy_float64)
engparme = Array(dtype=numpy_float64)
class FlopsWrapper_output_Performance_Constraints(VariableTree):
"""Container for output.Performance.Constraints"""
# OpenMDAO Public Variables
constraint = Array(dtype=numpy_str)
value = Array(dtype=numpy_float64)
units = Array(dtype=numpy_str)
limit = Array(dtype=numpy_float64)
weight = Array(dtype=numpy_float64)
mach = Array(dtype=numpy_float64)
alt = Array(dtype=numpy_float64)
g = Array(dtype=numpy_float64)
location = Array(dtype=numpy_str)
class FlopsWrapper_output_Performance(VariableTree):
"""Container for output.Performance"""
# OpenMDAO Public Variables
fuel = Float(0.0)
range = Float(0.0)
vapp = Float(0.0)
taxofl = Float(0.0)
faroff = Float(0.0)
farldg = Float(0.0)
amfor = Float(0.0)
ssfor = Float(0.0)
esf = Float(0.0)
thrso = Float(0.0)
vmmo = Float(0.0)
# VariableTrees
Constraints = VarTree(FlopsWrapper_output_Performance_Constraints())
Segments = VarTree(FlopsWrapper_output_Performance_Segments())
class FlopsWrapper_output_Payload(VariableTree):
"""Container for output.Payload"""
# OpenMDAO Public Variables
npf = Int(0)
npb = Int(0)
npt = Int(0)
nstu = Int(0)
ngalc = Int(0)
nflcr = Int(0)
nstuag = Int(0)
wppass = Float(0.0)
bpp = Float(0.0)
cargow = Float(0.0)
cargof = Float(0.0)
wcon = Float(0.0)
class FlopsWrapper_output_Noise(VariableTree):
"""Container for output.Noise"""
# OpenMDAO Public Variables
nsplot = Str('', msg='Noise output filename')
class FlopsWrapper_output_Geometry_BWB(VariableTree):
"""Container for output.Geometry.BWB"""
# OpenMDAO Public Variables
xlp = Float(0.0, units='ft', desc='Length of centerline')
xlw = Float(0.0, units='ft', desc='Length of side wall')
wf = Float(0.0, units='ft', desc='Width of cabin')
acabin = Float(0.0, units='ft*ft', desc='Cabin area')
nbaw = Int(0, desc='Number of bays')
bayw = Float(0.0, units='ft', desc='Width of bay')
nlava = Int(0, desc='NUMBER OF LAVATORIES')
ngally = Int(0, desc='Number of galleys')
nclset = Int(0, desc='Number of closets')
xl = Float(0.0, units='ft', desc='Total fuselage length')
df = Float(0.0, units='ft', desc='Fuselage maximum depth')
class FlopsWrapper_output_Geometry(VariableTree):
"""Container for output.Geometry"""
# OpenMDAO Public Variables
xl = Float(0.0)
wf = Float(0.0)
df = Float(0.0)
xlp = Float(0.0)
ar = Float(0.0)
sw = Float(0.0)
tr = Float(0.0)
sweep = Float(0.0)
tca = Float(0.0)
span = Float(0.0)
glov = Float(0.0)
sht = Float(0.0)
svt = Float(0.0)
xnac = Float(0.0)
dnac = Float(0.0)
xmlg = Float(0.0)
xnlg = Float(0.0)
# VariableTrees
BWB = VarTree(FlopsWrapper_output_Geometry_BWB())
class FlopsWrapper_output_Engine(VariableTree):
"""Container for output.Engine"""
# OpenMDAO Public Variables
ofile = Str('')
eofile = Str('')
anopp = Str('')
footpr = Str('')
pltfil = Str('')
class FlopsWrapper_output_Econ(VariableTree):
"""Container for output.Econ"""
# OpenMDAO Public Variables
sl = Array(dtype=numpy_float64)
blockt = Array(dtype=numpy_float64)
blockf = Array(dtype=numpy_float64)
blockNx = Array(dtype=numpy_float64)
wpayl = Array(dtype=numpy_float64)
wgross = Array(dtype=numpy_float64)
range = Array(dtype=numpy_float64)
vapp = Array(dtype=numpy_float64)
faroff = Array(dtype=numpy_float64)
farldg = Array(dtype=numpy_float64)
amfor = Array(dtype=numpy_float64)
ssfor = Array(dtype=numpy_float64)
class FlopsWrapper_output(VariableTree):
"""Container for output"""
# VariableTrees
Econ = VarTree(FlopsWrapper_output_Econ())
Engine = VarTree(FlopsWrapper_output_Engine())
Geometry = VarTree(FlopsWrapper_output_Geometry())
Noise = VarTree(FlopsWrapper_output_Noise())
Payload = VarTree(FlopsWrapper_output_Payload())
Performance = VarTree(FlopsWrapper_output_Performance())
Plot_Files = VarTree(FlopsWrapper_output_Plot_Files())
Weight = VarTree(FlopsWrapper_output_Weight())
class FlopsWrapper_input_wtin_Wing_Data(VariableTree):
"""Container for input.wtin.Wing_Data"""
# OpenMDAO Public Variables
span = Float(0.0, units='ft', desc='Wing span (optional, see &CONFIN - SW and AR)')
dih = Float(0.0, units='deg', desc='Wing dihedral (positive) or anhedral (negative) angle')
flapr = Float(0.3330, desc='Flap ratio -- ratio of total movable wing surface area (flaps, elevators, spoilers, etc.) to wing area')
glov = Float(0.0, units='ft*ft', desc='Total glove and bat area beyond theoretical wing')
varswp = Float(0.0, desc='Fraction of wing variable sweep weight penalty = 0., Fixed-geometry wing = 1., Full variable-sweep wing')
fcomp = Float(0.0, desc='Decimal fraction of amount of composites used in wing structure = 0., No composites = 1., Maximum use of composites, approximately equivalent to FRWI1=.6, FRWI2=.83, FRWI3=.7 (Not necessarily all composite) This only applies to the wing. Use override parameters for other components such as FRHT=.75, FRVT=.75, FRFU=.82, FRLGN=.85, FRLGM=.85, FRNA=.8')
faert = Float(0.0, desc='Decimal fraction of amount of aeroelastic tailoring used in design of wing = 0., No aeroelastic tailoring = 1., Maximum aeroelastic tailoring')
fstrt = Float(0.0, desc='Wing strut-bracing factor = 0., No wing strut = 1., Full benefit from strut bracing')
class FlopsWrapper_input_wtin_Tails_Fins(VariableTree):
"""Container for input.wtin.Tails_Fins"""
# OpenMDAO Public Variables
sht = Float(0.0, units='ft*ft', desc='Horizontal tail theoretical area')
swpht = Float(-100.0, units='deg', desc='Horizontal tail 25% chord sweep angle (Default = SWEEP, Namelist &CONFIN)')
arht = Float(-100.0, desc='Horizontal tail theoretical aspect ratio (Default = AR/2, Namelist &CONFIN)')
trht = Float(-100.0, desc='Horizontal tail theoretical taper ratio (Default = TR, Namelist &CONFIN)')
tcht = Float(0.0, desc='Thickness-chord ratio for the horizontal tail (Default = TCA, Namelist &CONFIN)')
hht = Float(-100.0, desc='Decimal | |
in AGI
ymod1 = (e00200 + e00700 + e00800 + e01400 + e01700 +
invinc - invinc_agi_ec + e02100 + e02300 +
max(e00900 + e02000, -ALD_BusinessLosses_c[MARS - 1]))
if CG_nodiff:
# apply QDIV+CG exclusion if QDIV+LTCG receive no special tax treatment
qdcg_pos = max(0., e00650 + c01000)
qdcg_exclusion = (min(CG_ec, qdcg_pos) +
CG_reinvest_ec_rt * max(0., qdcg_pos - CG_ec))
ymod1 = max(0., ymod1 - qdcg_exclusion)
invinc_agi_ec += qdcg_exclusion
# compute ymod variable that is used in OASDI benefit taxation logic
ymod2 = e00400 + (0.50 * e02400) - c02900
ymod3 = (1. - ALD_StudentLoan_hc) * e03210 + e03230 + e03240
ymod = ymod1 + ymod2 + ymod3
return (c01000, c23650, ymod, ymod1, invinc_agi_ec,
gains_at_death, taxable_gains_at_death)
@iterate_jit(nopython=True)
def SSBenefits(MARS, ymod, e02400, SS_thd50, SS_thd85,
SS_percentage1, SS_percentage2, c02500):
"""
Calculates OASDI benefits included in AGI, c02500.
"""
if ymod < SS_thd50[MARS - 1]:
c02500 = 0.
elif ymod < SS_thd85[MARS - 1]:
c02500 = SS_percentage1 * min(ymod - SS_thd50[MARS - 1], e02400)
else:
c02500 = min(SS_percentage2 * (ymod - SS_thd85[MARS - 1]) +
SS_percentage1 *
min(e02400, SS_thd85[MARS - 1] -
SS_thd50[MARS - 1]), SS_percentage2 * e02400)
return c02500
@iterate_jit(nopython=True)
def UBI(nu18, n1820, n21, UBI_u18, UBI_1820, UBI_21, UBI_ecrt,
ubi, taxable_ubi, nontaxable_ubi):
"""
Calculates total and taxable Universal Basic Income (UBI) amount.
Parameters
----------
nu18: Number of people in the tax unit under 18
n1820: Number of people in the tax unit age 18-20
n21: Number of people in the tax unit age 21+
UBI_u18: UBI benefit for those under 18
UBI_1820: UBI benefit for those between 18 to 20
UBI_21: UBI benefit for those 21 or more
UBI_ecrt: Fraction of UBI benefits that are not included in AGI
Returns
-------
ubi: total UBI received by the tax unit (is included in expanded_income)
taxable_ubi: amount of UBI that is taxable (is added to AGI)
nontaxable_ubi: amount of UBI that is nontaxable
"""
ubi = nu18 * UBI_u18 + n1820 * UBI_1820 + n21 * UBI_21
taxable_ubi = ubi * (1. - UBI_ecrt)
nontaxable_ubi = ubi - taxable_ubi
return ubi, taxable_ubi, nontaxable_ubi
@iterate_jit(nopython=True)
def AGI(ymod1, c02500, c02900, XTOT, MARS, sep, DSI, exact, nu18, taxable_ubi,
II_em, II_em_ps, II_prt, II_no_em_nu18,
c00100, pre_c04600, c04600):
"""
Computes Adjusted Gross Income (AGI), c00100, and
compute personal exemption amount, c04600.
"""
# calculate AGI assuming no foreign earned income exclusion
c00100 = ymod1 + c02500 - c02900 + taxable_ubi
# calculate personal exemption amount
if II_no_em_nu18: # repeal of personal exemptions for deps. under 18
pre_c04600 = max(0, XTOT - nu18) * II_em
else:
pre_c04600 = XTOT * II_em
if DSI:
pre_c04600 = 0.
# phase-out personal exemption amount
if exact == 1: # exact calculation as on tax forms
line5 = max(0., c00100 - II_em_ps[MARS - 1])
line6 = math.ceil(line5 / (2500. / sep))
line7 = II_prt * line6
c04600 = max(0., pre_c04600 * (1. - line7))
else: # smoothed calculation needed for sensible mtr calculation
dispc_numer = II_prt * (c00100 - II_em_ps[MARS - 1])
dispc_denom = 2500. / sep
dispc = min(1., max(0., dispc_numer / dispc_denom))
c04600 = pre_c04600 * (1. - dispc)
return (c00100, pre_c04600, c04600)
@iterate_jit(nopython=True)
def ItemDedCap(e17500, e18400, e18500, e19200, e19800, e20100, e20400, g20500,
c00100, ID_AmountCap_rt, ID_AmountCap_Switch, e17500_capped,
e18400_capped, e18500_capped, e19200_capped, e19800_capped,
e20100_capped, e20400_capped, g20500_capped):
"""
Applies a cap to gross itemized deductions.
Notes
-----
Tax Law Parameters:
ID_AmountCap_Switch : Indicator for which itemized deductions are
capped
ID_AmountCap_rt : Cap on itemized deductions; decimal fraction of AGI
Taxpayer Characteristics:
e17500 : Medical expenses
e18400 : State and local taxes
e18500 : Real-estate taxes
e19200 : Interest paid
e19800 : Charity cash contributions
e20100 : Charity noncash contributions
e20400 : Total miscellaneous expenses
g20500 : Gross casualty or theft loss (before disregard)
c00100: Adjusted Gross Income
Returns
-------
e17500_capped: Medical expenses, capped by ItemDedCap
e18400_capped: State and local taxes, capped by ItemDedCap
e18500_capped : Real-estate taxes, capped by ItemDedCap
e19200_capped : Interest paid, capped by ItemDedCap
e19800_capped : Charity cash contributions, capped by ItemDedCap
e20100_capped : Charity noncash contributions, capped by ItemDedCap
e20400_capped : Total miscellaneous expenses, capped by ItemDedCap
g20500_capped : Gross casualty or theft loss (before disregard),
capped by ItemDedCap
"""
# pylint: disable=too-many-branches
cap = max(0., ID_AmountCap_rt * c00100)
gross_ded_amt = 0
if ID_AmountCap_Switch[0]: # medical
gross_ded_amt += e17500
if ID_AmountCap_Switch[1]: # statelocal
gross_ded_amt += e18400
if ID_AmountCap_Switch[2]: # realestate
gross_ded_amt += e18500
if ID_AmountCap_Switch[3]: # casualty
gross_ded_amt += g20500
if ID_AmountCap_Switch[4]: # misc
gross_ded_amt += e20400
if ID_AmountCap_Switch[5]: # interest
gross_ded_amt += e19200
if ID_AmountCap_Switch[6]: # charity
gross_ded_amt += e19800 + e20100
overage = max(0., gross_ded_amt - cap)
e17500_capped = e17500
e18400_capped = e18400
e18500_capped = e18500
g20500_capped = g20500
e20400_capped = e20400
e19200_capped = e19200
e19800_capped = e19800
e20100_capped = e20100
if overage > 0. and c00100 > 0.:
if ID_AmountCap_Switch[0]: # medical
e17500_capped -= (e17500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[1]: # statelocal
e18400_capped -= (e18400 / (gross_ded_amt) * overage)
if ID_AmountCap_Switch[2]: # realestate
e18500_capped -= (e18500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[3]: # casualty
g20500_capped -= (g20500 / gross_ded_amt) * overage
if ID_AmountCap_Switch[4]: # misc
e20400_capped -= (e20400 / gross_ded_amt) * overage
if ID_AmountCap_Switch[5]: # interest
e19200_capped -= (e19200 / gross_ded_amt) * overage
if ID_AmountCap_Switch[6]: # charity
e19800_capped -= (e19800 / gross_ded_amt) * overage
e20100_capped -= (e20100 / gross_ded_amt) * overage
return (e17500_capped, e18400_capped, e18500_capped, g20500_capped,
e20400_capped, e19200_capped, e19800_capped, e20100_capped)
@iterate_jit(nopython=True)
def ItemDed(e17500_capped, e18400_capped, e18500_capped, e19200_capped,
e19800_capped, e20100_capped, e20400_capped, g20500_capped,
MARS, age_head, age_spouse, c00100, c04470, c21040, c21060,
c17000, c18300, c19200, c19700, c20500, c20800,
ID_ps, ID_Medical_frt, ID_Medical_frt_add4aged, ID_Medical_hc,
ID_Casualty_frt, ID_Casualty_hc, ID_Miscellaneous_frt,
ID_Miscellaneous_hc, ID_Charity_crt_all, ID_Charity_crt_noncash,
ID_prt, ID_crt, ID_c, ID_StateLocalTax_hc, ID_Charity_frt,
ID_Charity_hc, ID_InterestPaid_hc, ID_RealEstate_hc,
ID_Medical_c, ID_StateLocalTax_c, ID_RealEstate_c,
ID_InterestPaid_c, ID_Charity_c, ID_Casualty_c,
ID_Miscellaneous_c, ID_AllTaxes_c, ID_AllTaxes_hc,
ID_StateLocalTax_crt, ID_RealEstate_crt, ID_Charity_f):
"""
Calculates itemized deductions, Form 1040, Schedule A.
Notes
-----
Tax Law Parameters:
ID_ps : Itemized deduction phaseout AGI start (Pease)
ID_crt : Itemized deduction maximum phaseout
as a decimal fraction of total itemized deduction (Pease)
ID_prt : Itemized deduction phaseout rate (Pease)
ID_c: Dollar limit on itemized deductions
ID_Medical_frt : Deduction for medical expenses;
floor as a decimal fraction of AGI
ID_Medical_frt_add4aged : Addon for medical expenses deduction for
elderly; addon as a decimal fraction of AGI
ID_Casualty_frt : Deduction for casualty loss;
floor as a decimal fraction of AGI
ID_Miscellaneous_frt : Deduction for miscellaneous expenses;
floor as a decimal fraction of AGI
ID_Charity_crt_all : Deduction for all charitable contributions;
ceiling as a decimal fraction of AGI
ID_Charity_crt_noncash : Deduction for noncash charitable
contributions; ceiling as a decimal
fraction of AGI
ID_Charity_frt : Disregard for charitable contributions;
floor as a decimal fraction of AGI
ID_Medical_c : Ceiling on medical expense deduction
ID_StateLocalTax_c : Ceiling on state and local tax deduction
ID_RealEstate_c : Ceiling on real estate tax deduction
ID_AllTaxes_c: Ceiling combined state and local income/sales and
real estate tax deductions
ID_InterestPaid_c : Ceiling on interest paid deduction
ID_Charity_c : Ceiling on charity expense deduction
ID_Charity_f: Floor on charity expense deduction
ID_Casualty_c : Ceiling on casuality expense deduction
ID_Miscellaneous_c : Ceiling on miscellaneous expense deduction
ID_StateLocalTax_crt : Deduction for state and local taxes;
ceiling as a decimal fraction of AGI
ID_RealEstate_crt : Deduction for real estate taxes;
ceiling as a decimal fraction of AGI
Taxpayer Characteristics:
e17500_capped : Medical expenses, capped by ItemDedCap
e18400_capped : State and local taxes, capped by ItemDedCap
e18500_capped : Real-estate taxes, capped by ItemDedCap
e19200_capped : Interest paid, capped by ItemDedCap
e19800_capped : Charity cash contributions, capped by ItemDedCap
e20100_capped : Charity noncash contributions, capped by ItemDedCap
e20400_capped : Total miscellaneous expenses, capped by ItemDedCap
g20500_capped : Gross casualty or theft loss (before disregard),
capped by ItemDedCap
Returns
-------
c04470 : total itemized deduction amount (and other intermediate variables)
"""
posagi = max(c00100, 0.)
# Medical
medical_frt = ID_Medical_frt
if age_head >= 65 or (MARS == 2 and age_spouse >= 65):
medical_frt += ID_Medical_frt_add4aged
c17750 = medical_frt * posagi
c17000 = max(0., e17500_capped - c17750) * (1. - ID_Medical_hc)
c17000 = min(c17000, ID_Medical_c[MARS - | |
(208, 192, 229),
"prim" : (240, 226, 236),
"primrose" : (237, 234, 153),
"provincial pink" : (254, 245, 241),
"prussian blue" : ( 0, 49, 83),
"puce" : (204, 136, 153),
"pueblo" : (125, 44, 20),
"puerto rico" : ( 63, 193, 170),
"pumice" : (194, 202, 196),
"pumpkin" : (255, 117, 24),
"pumpkin skin" : (177, 97, 11),
"punch" : (220, 67, 51),
"punga" : ( 77, 61, 20),
"purple" : (102, 0, 153),
"purple heart" : (101, 45, 193),
"purple mountain's majesty" : (150, 120, 182),
"purple pizzazz" : (255, 0, 204),
"putty" : (231, 205, 140),
"quarter pearl lusta" : (255, 253, 244),
"quarter spanish white" : (247, 242, 225),
"quicksand" : (189, 151, 142),
"quill gray" : (214, 214, 209),
"quincy" : ( 98, 63, 45),
"racing green" : ( 12, 25, 17),
"radical red" : (255, 53, 94),
"raffia" : (234, 218, 184),
"rainee" : (185, 200, 172),
"rajah" : (247, 182, 104),
"rangitoto" : ( 46, 50, 34),
"rangoon green" : ( 28, 30, 19),
"raven" : (114, 123, 137),
"raw sienna" : (210, 125, 70),
"raw umber" : (115, 74, 18),
"razzle dazzle rose" : (255, 51, 204),
"razzmatazz" : (227, 11, 92),
"rebel" : ( 60, 18, 6),
"red" : (255, 0, 0),
"red beech" : (123, 56, 1),
"red berry" : (142, 0, 0),
"red damask" : (218, 106, 65),
"red devil" : (134, 1, 17),
"red orange" : (255, 63, 52),
"red oxide" : (110, 9, 2),
"red ribbon" : (237, 10, 63),
"red robin" : (128, 52, 31),
"red stage" : (208, 95, 4),
"red violet" : (199, 21, 133),
"redwood" : ( 93, 30, 15),
"reef" : (201, 255, 162),
"reef gold" : (159, 130, 28),
"regal blue" : ( 1, 63, 106),
"regent gray" : (134, 148, 159),
"regent st blue" : (170, 214, 230),
"remy" : (254, 235, 243),
"reno sand" : (168, 101, 21),
"resolution blue" : ( 0, 35, 135),
"revolver" : ( 44, 22, 50),
"rhino" : ( 46, 63, 98),
"rice cake" : (255, 254, 240),
"rice flower" : (238, 255, 226),
"<NAME>" : (168, 83, 7),
"rio grande" : (187, 208, 9),
"<NAME>" : (244, 216, 28),
"<NAME>" : ( 65, 0, 86),
"riptide" : (139, 230, 216),
"river bed" : ( 67, 76, 89),
"<NAME>" : (234, 198, 116),
"robin's egg blue" : ( 0, 204, 204),
"rock" : ( 77, 56, 51),
"rock blue" : (158, 177, 205),
"rock spray" : (186, 69, 12),
"rodeo dust" : (201, 178, 155),
"rolling stone" : (116, 125, 131),
"roman" : (222, 99, 96),
"roman coffee" : (121, 93, 76),
"romance" : (255, 254, 253),
"romantic" : (255, 210, 183),
"ronchi" : (236, 197, 78),
"roof terracotta" : (166, 47, 32),
"rope" : (142, 77, 30),
"rose" : (255, 0, 127),
"rose bud" : (251, 178, 163),
"rose bud cherry" : (128, 11, 71),
"rose fog" : (231, 188, 180),
"rose white" : (255, 246, 245),
"rose of sharon" : (191, 85, 0),
"rosewood" : (101, 0, 11),
"roti" : (198, 168, 75),
"rouge" : (162, 59, 108),
"royal blue" : ( 65, 105, 225),
"royal heath" : (171, 52, 114),
"royal purple" : (107, 63, 160),
"rum" : (121, 105, 137),
"rum swizzle" : (249, 248, 228),
"russet" : (128, 70, 27),
"russett" : (117, 90, 87),
"rust" : (183, 65, 14),
"rustic red" : ( 72, 4, 4),
"rusty nail" : (134, 86, 10),
"saddle" : ( 76, 48, 36),
"saddle brown" : ( 88, 52, 1),
"saffron" : (244, 196, 48),
"saffron mango" : (249, 191, 88),
"sage" : (158, 165, 135),
"sahara" : (183, 162, 20),
"sahara sand" : (241, 231, 136),
"sail" : (184, 224, 249),
"salem" : ( 9, 127, 75),
"salmon" : (255, 140, 105),
"salomie" : (254, 219, 141),
"salt box" : (104, 94, 110),
"saltpan" : (241, 247, 242),
"sambuca" : ( 58, 32, 16),
"<NAME>" : ( 11, 98, 7),
"<NAME>" : ( 48, 75, 106),
"<NAME>" : ( 69, 108, 172),
"sand dune" : (130, 111, 101),
"sandal" : (170, 141, 111),
"sandrift" : (171, 145, 122),
"sandstone" : (121, 109, 98),
"sandwisp" : (245, 231, 162),
"sandy beach" : (255, 234, 200),
"sandy brown" : (244, 164, 96),
"sangria" : (146, 0, 10),
"sanguine brown" : (141, 61, 56),
"santa fe" : (177, 109, 82),
"santas gray" : (159, 160, 177),
"sapling" : (222, 212, 164),
"sapphire" : ( 47, 81, 158),
"saratoga" : ( 85, 91, 16),
"satin linen" : (230, 228, 212),
"sauvignon" : (255, 245, 243),
"sazerac" : (255, 244, 224),
"scampi" : (103, 95, 166),
"scandal" : (207, 250, 244),
"scarlet" : (255, 36, 0),
"scarlet gum" : ( 67, 21, 96),
"scarlett" : (149, 0, 21),
"scarpa flow" : ( 88, 85, 98),
"schist" : (169, 180, 151),
"school bus yellow" : (255, 216, 0),
"schooner" : (139, 132, 126),
"science blue" : ( 0, 102, 204),
"scooter" : ( 46, 191, 212),
"scorpion" : (105, 95, 98),
"scotch mist" : (255, 251, 220),
"screamin' green" : (102, 255, 102),
"sea buckthorn" : (251, 161, 41),
"sea green" : ( 46, 139, 87),
"sea mist" : (197, 219, 202),
"sea nymph" : (120, 163, 156),
"sea pink" : (237, 152, 158),
"seagull" : (128, 204, 234),
"seance" : (115, 30, 143),
"seashell" : (241, 241, 241),
"seashell peach" : (255, 245, 238),
"seaweed" : ( 27, 47, 17),
"selago" : (240, 238, 253),
"selective yellow" : (255, 186, 0),
"sepia" : (112, 66, 20),
"sepia black" : ( 43, 2, 2),
"sepia skin" : (158, 91, 64),
"serenade" : (255, 244, 232),
"shadow" : (131, 112, 80),
"shadow green" : (154, 194, 184),
"shady lady" : (170, 165, 169),
"shakespeare" : ( 78, 171, 209),
"shalimar" : (251, 255, 186),
"shamrock" : ( 51, 204, 153),
"shark" : ( 37, 39, 44),
"sherpa blue" : ( 0, 73, 80),
"sherwood green" : ( 2, 64, 44),
"shilo" : (232, 185, 179),
"shingle fawn" : (107, 78, 49),
"ship cove" : (120, 139, 186),
"ship gray" : ( 62, 58, 68),
"shiraz" : (178, 9, 49),
"shocking" : (226, 146, 192),
"shocking pink" : (252, 15, 192),
"shuttle gray" : ( 95, 102, 114),
"siam" : (100, 106, 84),
"sidecar" : (243, 231, 187),
"silk" : (189, 177, 168),
"silver" : (192, 192, 192),
"silver chalice" : (172, 172, 172),
"silver rust" : (201, 192, 187),
"silver sand" : (191, 193, 194),
"silver tree" : (102, 181, 143),
"sinbad" : (159, 215, 211),
"siren" : (122, 1, 58),
"sirocco" : (113, 128, 128),
"sisal" : (211, 203, 186),
"skeptic" : (202, 230, 218),
"sky blue" : (118, 215, 234),
"slate gray" : (112, 128, 144),
"smalt" : ( 0, 51, 153),
"smalt blue" : ( 81, 128, 143),
"smoky" : ( 96, 91, 115),
"snow drift" : (247, 250, 247),
"snow flurry" : (228, 255, 209),
"snowy mint" : (214, 255, 219),
"snuff" : (226, 216, 237),
"soapstone" : (255, 251, 249),
"soft amber" : (209, 198, 180),
"soft peach" : (245, 237, 239),
"solid pink" : (137, 56, 67),
"solitaire" : (254, 248, 226),
"solitude" : (234, 246, 255),
"sorbus" : (253, 124, 7),
"sorrell brown" : (206, 185, 143),
"soya bean" : (106, 96, 81),
"spanish green" : (129, 152, 133),
"spectra" : ( 47, 90, 87),
"spice" : (106, 68, 46),
"spicy mix" : (136, 83, 66),
"spicy mustard" : (116, 100, 13),
"spicy pink" : (129, 110, 113),
"spindle" : (182, 209, 234),
"spray" : (121, 222, 236),
"spring green" : ( 0, 255, 127),
"spring leaves" : ( 87, 131, 99),
"spring rain" : (172, 203, 177),
"spring sun" : (246, | |
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import (
MinValueValidator,
MaxValueValidator,
validate_ipv6_address,
validate_ipv4_address,
)
from django.forms import (
CharField,
IntegerField,
BooleanField,
NullBooleanField,
)
from django.urls import reverse_lazy
from extras.forms import AddRemoveTagsForm
from extras.models.tags import Tag
from utilities.forms import (
CSVModelForm,
BootstrapMixin,
BulkEditNullBooleanSelect,
DynamicModelMultipleChoiceField,
TagFilterField,
StaticSelect,
CSVChoiceField,
CSVModelChoiceField,
DynamicModelChoiceField,
APISelect,
StaticSelectMultiple,
add_blank_choice,
)
from .fields import CustomDynamicModelMultipleChoiceField
from .models import NameServer, Record, Zone
class BulkEditForm(forms.Form):
"""Base form for editing multiple objects in bulk."""
def __init__(self, model, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = model
self.nullable_fields = []
if hasattr(self.Meta, "nullable_fields"):
self.nullable_fields = self.Meta.nullable_fields
class ZoneForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new Zone object."""
def __init__(self, *args, **kwargs):
"""Override the __init__ method in order to provide the initial value for the default fields"""
super().__init__(*args, **kwargs)
defaults = settings.PLUGINS_CONFIG.get("netbox_dns")
def _initialize(initial, setting):
if initial.get(setting, None) in (None, ""):
initial[setting] = defaults.get(f"zone_{setting}", None)
for setting in (
"default_ttl",
"soa_ttl",
"soa_rname",
"soa_serial_auto",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
):
_initialize(self.initial, setting)
if self.initial.get("soa_ttl", None) is None:
self.initial["soa_ttl"] = self.initial.get("default_ttl", None)
if self.initial.get("soa_serial_auto"):
self.initial["soa_serial"] = None
if self.initial.get("soa_mname", None) in (None, ""):
default_soa_mname = defaults.get("zone_soa_mname", None)
if default_soa_mname is not None:
try:
self.initial["soa_mname"] = NameServer.objects.get(
name=default_soa_mname
)
except NameServer.DoesNotExist:
pass
if not self.initial.get("nameservers", []):
default_nameservers = defaults.get("zone_nameservers", [])
if default_nameservers:
self.initial["nameservers"] = NameServer.objects.filter(
name__in=default_nameservers
)
def clean_default_ttl(self):
return (
self.cleaned_data["default_ttl"]
if self.cleaned_data["default_ttl"]
else self.initial["default_ttl"]
)
nameservers = CustomDynamicModelMultipleChoiceField(
queryset=NameServer.objects.all(),
required=False,
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
)
default_ttl = IntegerField(
required=False,
label="Default TTL",
help_text="Default TTL for new records in this zone",
validators=[MinValueValidator(1)],
)
soa_ttl = IntegerField(
required=True,
label="SOA TTL",
help_text="TTL for the SOA record of the zone",
validators=[MinValueValidator(1)],
)
soa_rname = CharField(
required=True,
label="SOA Responsible",
help_text="Mailbox of the zone's administrator",
)
soa_serial_auto = BooleanField(
required=False,
label="Generate SOA Serial",
help_text="Automatically generate the SOA Serial",
)
soa_serial = IntegerField(
required=False,
label="SOA Serial",
help_text="Serial number of the current zone data version",
validators=[MinValueValidator(1)],
)
soa_refresh = IntegerField(
required=True,
label="SOA Refresh",
help_text="Refresh interval for secondary name servers",
validators=[MinValueValidator(1)],
)
soa_retry = IntegerField(
required=True,
label="SOA Retry",
help_text="Retry interval for secondary name servers",
validators=[MinValueValidator(1)],
)
soa_expire = IntegerField(
required=True,
label="SOA Expire",
help_text="Expire time after which the zone is considered unavailable",
validators=[MinValueValidator(1)],
)
soa_minimum = IntegerField(
required=True,
label="SOA Minimum TTL",
help_text="Minimum TTL for negative results, e.g. NXRRSET",
validators=[MinValueValidator(1)],
)
class Meta:
model = Zone
fields = (
"name",
"status",
"nameservers",
"default_ttl",
"tags",
"soa_ttl",
"soa_mname",
"soa_rname",
"soa_serial_auto",
"soa_serial",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
)
widgets = {
"status": StaticSelect(),
"soa_mname": StaticSelect(),
}
help_texts = {
"soa_mname": "Primary name server for the zone",
}
class ZoneFilterForm(BootstrapMixin, forms.Form):
"""Form for filtering Zone instances."""
model = Zone
q = CharField(
required=False,
widget=forms.TextInput(attrs={"placeholder": "Name or Status"}),
label="Search",
)
status = forms.ChoiceField(
choices=add_blank_choice(Zone.CHOICES),
required=False,
widget=StaticSelect(),
)
name = CharField(
required=False,
label="Name",
)
nameservers = CustomDynamicModelMultipleChoiceField(
queryset=NameServer.objects.all(),
required=False,
)
tag = TagFilterField(Zone)
class ZoneCSVForm(CSVModelForm, BootstrapMixin, forms.ModelForm):
status = CSVChoiceField(
choices=Zone.CHOICES,
help_text="Zone status",
)
default_ttl = IntegerField(
required=False,
help_text="Default TTL",
)
soa_ttl = IntegerField(
required=False,
help_text="TTL for the SOA record of the zone",
)
soa_mname = CSVModelChoiceField(
queryset=NameServer.objects.all(),
required=False,
to_field_name="name",
help_text="Primary name server for the zone",
error_messages={
"invalid_choice": "Nameserver not found.",
},
)
soa_rname = CharField(
required=False,
help_text="Mailbox of the zone's administrator",
)
soa_serial_auto = BooleanField(
required=False,
help_text="Generate the SOA serial",
)
soa_serial = IntegerField(
required=False,
help_text="Serial number of the current zone data version",
)
soa_refresh = IntegerField(
required=False,
help_text="Refresh interval for secondary name servers",
)
soa_retry = IntegerField(
required=False,
help_text="Retry interval for secondary name servers",
)
soa_expire = IntegerField(
required=False,
help_text="Expire time after which the zone is considered unavailable",
)
soa_minimum = IntegerField(
required=False,
help_text="Minimum TTL for negative results, e.g. NXRRSET",
)
def _get_default_value(self, field):
_default_values = settings.PLUGINS_CONFIG.get("netbox_dns", dict())
if _default_values.get("zone_soa_ttl", None) is None:
_default_values["zone_soa_ttl"] = _default_values.get(
"zone_default_ttl", None
)
return _default_values.get(f"zone_{field}", None)
def _clean_field_with_defaults(self, field):
if self.cleaned_data[field]:
value = self.cleaned_data[field]
else:
value = self._get_default_value(field)
if value is None:
raise ValidationError(f"{field} not set and no default value available")
return value
def clean_default_ttl(self):
return self._clean_field_with_defaults("default_ttl")
def clean_soa_ttl(self):
return self._clean_field_with_defaults("soa_ttl")
def clean_soa_mname(self):
return self._clean_field_with_defaults("soa_mname")
def clean_soa_rname(self):
return self._clean_field_with_defaults("soa_rname")
def clean_soa_serial_auto(self):
try:
return self._clean_field_with_defaults("soa_serial_auto")
except ValidationError:
if self.cleaned_data["soa_serial"] or self._get_default_value("soa_serial"):
return None
else:
raise
def clean_soa_serial(self):
try:
return self._clean_field_with_defaults("soa_serial")
except ValidationError:
if self.cleaned_data["soa_serial_auto"] or self._get_default_value(
"soa_serial_auto"
):
return None
else:
raise
def clean_soa_refresh(self):
return self._clean_field_with_defaults("soa_refresh")
def clean_soa_retry(self):
return self._clean_field_with_defaults("soa_retry")
def clean_soa_expire(self):
return self._clean_field_with_defaults("soa_expire")
def clean_soa_minimum(self):
return self._clean_field_with_defaults("soa_minimum")
class Meta:
model = Zone
fields = (
"name",
"status",
"default_ttl",
"soa_ttl",
"soa_mname",
"soa_rname",
"soa_serial_auto",
"soa_serial",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
)
class ZoneBulkEditForm(BootstrapMixin, AddRemoveTagsForm, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Zone.objects.all(),
widget=forms.MultipleHiddenInput(),
)
status = forms.ChoiceField(
choices=add_blank_choice(Zone.CHOICES),
required=False,
widget=StaticSelect(),
)
nameservers = CustomDynamicModelMultipleChoiceField(
queryset=NameServer.objects.all(),
required=False,
)
default_ttl = IntegerField(
required=False,
label="Default TTL",
validators=[MinValueValidator(1)],
)
soa_ttl = IntegerField(
required=False,
label="SOA TTL",
validators=[MinValueValidator(1)],
)
soa_mname = DynamicModelChoiceField(
queryset=NameServer.objects.all(),
required=False,
label="SOA Primary Nameserver",
widget=APISelect(
attrs={
"data-url": reverse_lazy("plugins-api:netbox_dns-api:nameserver-list")
}
),
)
soa_rname = CharField(
required=False,
label="SOA Responsible",
)
soa_serial_auto = NullBooleanField(
required=False,
widget=BulkEditNullBooleanSelect(),
label="Generate SOA Serial",
)
soa_serial = IntegerField(
required=False,
label="SOA Serial",
validators=[MinValueValidator(1), MaxValueValidator(4294967295)],
)
soa_refresh = IntegerField(
required=False,
label="SOA Refresh",
validators=[MinValueValidator(1)],
)
soa_retry = IntegerField(
required=False,
label="SOA Retry",
validators=[MinValueValidator(1)],
)
soa_expire = IntegerField(
required=False,
label="SOA Expire",
validators=[MinValueValidator(1)],
)
soa_minimum = IntegerField(
required=False,
label="SOA Minimum TTL",
validators=[MinValueValidator(1)],
)
def clean(self):
"""
If soa_serial_auto is True, set soa_serial to None.
"""
cleaned_data = super().clean()
if cleaned_data.get("soa_serial_auto"):
cleaned_data["soa_serial"] = None
class Meta:
nullable_fields = []
model = Zone
fields = (
"name",
"status",
"nameservers",
"default_ttl",
"tags",
"soa_ttl",
"soa_rname",
"soa_serial_auto",
"soa_serial",
"soa_refresh",
"soa_retry",
"soa_expire",
"soa_minimum",
)
widgets = {
"status": StaticSelect(),
}
class NameServerForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new NameServer object."""
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
)
class Meta:
model = NameServer
fields = ("name", "tags")
class NameServerFilterForm(BootstrapMixin, forms.Form):
"""Form for filtering NameServer instances."""
model = NameServer
name = CharField(
required=False,
label="Name",
)
tag = TagFilterField(NameServer)
class NameServerCSVForm(CSVModelForm, BootstrapMixin, forms.ModelForm):
class Meta:
model = NameServer
fields = ("name",)
class NameServerBulkEditForm(BootstrapMixin, AddRemoveTagsForm, BulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=NameServer.objects.all(),
widget=forms.MultipleHiddenInput(),
)
class Meta:
nullable_fields = []
model = NameServer
fields = ("name", "tags")
class RecordForm(BootstrapMixin, forms.ModelForm):
"""Form for creating a new Record object."""
def clean(self):
"""
For A and AAA records, verify that a valid IPv4 or IPv6 was passed as
value and raise a ValidationError exception otherwise.
"""
cleaned_data = super().clean()
type = cleaned_data.get("type")
if type not in (Record.A, Record.AAAA):
return
value = cleaned_data.get("value")
try:
ip_version = "4" if type == Record.A else "6"
if type == Record.A:
validate_ipv4_address(value)
else:
validate_ipv6_address(value)
except ValidationError:
raise forms.ValidationError(
{
"value": f"A valid IPv{ip_version} address is required for record type {type}."
}
)
if cleaned_data.get("disable_ptr"):
return
pk = cleaned_data.get("pk")
conflicts = Record.objects.filter(value=value, type=type, disable_ptr=False)
if self.instance.pk:
conflicts = conflicts.exclude(pk=self.instance.pk)
if len(conflicts):
raise forms.ValidationError(
{
"value": f"There is already an {type} record with value {value} and PTR enabled."
}
)
def clean_ttl(self):
ttl = self.cleaned_data["ttl"]
if ttl is not None:
if ttl <= 0:
raise ValidationError("TTL must be greater than zero")
return ttl
else:
return self.cleaned_data["zone"].default_ttl
disable_ptr = BooleanField(
label="Disable PTR",
required=False,
)
tags = DynamicModelMultipleChoiceField(
queryset=Tag.objects.all(),
required=False,
)
ttl = IntegerField(
required=False,
label="TTL",
)
class Meta:
model = Record
fields = ("zone", "type", "disable_ptr", "name", "value", "ttl", "tags")
widgets = {
"zone": StaticSelect(),
"type": StaticSelect(),
}
class RecordFilterForm(BootstrapMixin, forms.Form):
"""Form for filtering Record instances."""
model = Record
q = CharField(
required=False,
widget=forms.TextInput(attrs={"placeholder": "Name, Zone or Value"}),
label="Search",
)
type = forms.MultipleChoiceField(
choices=add_blank_choice(Record.CHOICES),
required=False,
widget=StaticSelectMultiple(),
)
name = CharField(
required=False,
label="Name",
)
value = CharField(
required=False,
label="Value",
)
zone_id = CustomDynamicModelMultipleChoiceField(
queryset=Zone.objects.all(),
required=False,
label="Zone",
)
tag = TagFilterField(Record)
class RecordCSVForm(CSVModelForm, BootstrapMixin, forms.ModelForm):
zone = CSVModelChoiceField(
queryset=Zone.objects.all(),
to_field_name="name",
required=True,
help_text="Assigned zone",
)
type = CSVChoiceField(
choices=Record.CHOICES,
required=True,
help_text="Record Type",
)
ttl = IntegerField(
required=False,
help_text="TTL",
)
disable_ptr = forms.BooleanField(
required=False,
label="Disable PTR",
help_text="Disable generation of a PTR record",
)
def clean(self):
"""
For A and AAA records, verify that a valid IPv4 or IPv6 was passed as
value and raise a ValidationError exception otherwise.
"""
cleaned_data = super().clean()
type = cleaned_data.get("type")
if type not in (Record.A, Record.AAAA):
return
value = cleaned_data.get("value")
try:
ip_version = "4" if type == Record.A else "6"
if type == Record.A:
validate_ipv4_address(value)
else:
validate_ipv6_address(value)
except ValidationError:
raise forms.ValidationError(
{
"value": | |
<gh_stars>0
"""Logic for line following.
UPDATE:(Vijay - 9/27/15) - Follower is outdated since we won't be
line-following for IEEE Hardware Competition 2016. This file has
been left here for reference;
Please do not instantiate it until it's fixed.
"""
from time import time
from time import sleep
import numpy as np
import bot.lib.lib as lib
import bot.hardware.ir_hub as ir_hub_mod
import bot.driver.mec_driver as mec_driver_mod
import pid as pid_mod
import bot.hardware.color_sensor as color_sensor_mod
from error_cases import LineLostError
class Follower(object):
"""Follows a line, detects intersections and stop conditions."""
# Class variables
# Array_Conditions
Large_Object = 17 # the array sees a large object
No_Line = 16 # the array see no line
Noise = 19 # white values not next to each other
# Variables for read_binary calls
White_Black = True # False # True= white line, False= black line
set_speed = 35
THRESH = 60
def __init__(self):
# Build logger
self.logger = lib.get_logger()
# Build subsystems
self.ir_hub = ir_hub_mod.IRHub()
self.ir_hub.thrsh = 100
self.driver = mec_driver_mod.MecDriver()
self.color_sensor = color_sensor_mod.ColorSensor()
# Build PIDs
# FIXME 1 no longer in use
self.front_right = pid_mod.PID()
self.front_right_error = 0.0
self.front_left = pid_mod.PID()
self.front_left_error = 0.0
self.back_right = pid_mod.PID()
self.back_right_error = 0.0
self.back_left = pid_mod.PID()
self.back_left_error = 0.0
self.strafe = pid_mod.PID()
self.strafe_error = 0.0
self.rotate_pid = pid_mod.PID()
self.rotate_error = 0.0
self.error = "NONE"
# motor variables
# FIXME 2 same as before
self.translate_speed = 75
self.prev_rate = 0
# state variables
self.heading = None # must be initialize by caller
self.front_state = Follower.No_Line
self.back_state = Follower.No_Line
self.left_state = Follower.No_Line
self.right_state = Follower.No_Line
# post error vars
self.intersection = False
self.lost_line = False
self.timeLastUpdated = -1.0
self.on_x = False
@lib.api_call
def get_translate_speed(self):
return self.translate_speed
@lib.api_call
def set_translate_speed(self, speed):
self.translate_speed = speed
@lib.api_call
def update(self):
"""Read IR values, compute aggregates."""
ir_readings = self.ir_hub.read_binary(Follower.White_Black)
for name, reading in ir_readings.iteritems():
reading_arr = np.int_(reading) # convert readings to numpy array
np_reading = 4
reading_sum = np.sum(np_reading) # = no. of units lit
if reading_sum > 0:
self.ir_agg[name] = (
np.sum(self.ir_pos * reading_arr) / reading_sum)
else:
self.ir_agg[name] = None
self.timeLastUpdated = time.time()
@lib.api_call
def get_strafe_error(self):
return self.strafe_error
@lib.api_call
def get_rotate_error(self):
return self.rotate_error
def reset_errors(self):
self.error = "NONE"
# state variables
# self.front_state = Follower.No_Line
# self.back_state = Follower.No_Line
# self.left_state = Follower.No_Line
# self.right_state = Follower.No_Line
# post error vars
self.intersection = False
self.lost_line = False
self.timeLastUpdated = -1.0
self.strafe_error = 0.0
self.rotate_error = 0.0
# reset pids
self.strafe.clear_error()
self.rotate_pid.clear_error()
return
@lib.api_call
def report_states(self):
# for debug of IR sensor state
current_ir_reading = self.ir_hub.read_binary(Follower.White_Black)
self.front_state = self.get_position_lr(
current_ir_reading["front"])
# Back is on the back side
self.back_state = self.get_position_rl(
current_ir_reading["back"])
# Left is on the left
self.left_state = self.get_position_lr(
current_ir_reading["left"])
# right is on the right
self.right_state = self.get_position_rl(
current_ir_reading["right"])
self.logger.info("front = {}".format(self.front_state))
self.logger.info("back = {}".format(self.back_state))
self.logger.info("left = {}".format(self.left_state))
self.logger.info("right = {}".format(self.right_state))
@lib.api_call
def oscillate(self, heading=0, osc_time=0.5):
"""Oscillate sideways, increasing in amplitude until line is found
:param heading: The forward direction of the bot.
:param osc_time: The initial time spent in each direction.
"""
# Time in seconds for which bot oscillates in each direction.
# Speed at which the bot is oscillating.
# Increase in speed after each oscillation cycle.
# Todo(Ahmed): Find reasonable constants.
osc_speed = 10
# The oscillation directions, perpendicular to parameter "heading"
angle1 = heading + 90
angle2 = heading - 90
self.logger.debug(
"Pre-correction angles: angle1: {}, angle2: {}".format(
angle1, angle2))
# Correct angles to fit bounds.
angle1 %= self.driver.max_angle
angle2 %= self.driver.max_angle
self.logger.debug(
"Post-correction angles: angle1: {}, angle2: {}".format(
angle1, angle2))
# Heading may be unecessary.
# Test headings for valid 0,360 values.
# assert 0 <= angle1 <= 360, "angle1 is {}".format(angle1)
# assert 0 <= angle2 <= 360, "angle2 is {}".format(angle2)
# Todo: Consider making this a function call.
line_not_found = True
while line_not_found:
# Drives in each direction.
self.driver.move(osc_speed, angle1)
# Passes control to find line, which moves
# until it finds line or runs out of time.
# Note: watch_for_line returns "line_found"
# (bool) and "time_elapsed" (int)
results = self.watch_for_line(osc_time)
self.driver.move(0, 0)
if results["line_found"]:
line_not_found = False
# Search in other direction.
self.driver.move(osc_speed, angle2)
# "time elapsed" is used as max_time for more precise movements.
results = self.watch_for_line(results["time_elapsed"] * 2)
self.logger.debug(
"Oscillation direction 1: osc_speed: {}, heading: {}".format(
osc_speed, heading))
self.driver.move(0, 0)
if results["line_found"]:
line_not_found = False
# If line is not found, Continue looping until line is found.
# For now, stop when max speed is hit.
osc_speed += 90
if osc_speed >= self.driver.max_speed:
line_not_found = False
def reading_contains_pattern(self, pattern, reading):
"""Search the given reading for the given pattern.
:param pattern: Pattern to search reading for.
For example, [1, 1] for a pair of consecutive ones.
:type pattern: list
:param reading: IR array reading to search for the
given pattern. Should contain only 0s and 1s.
:type reading: list
:returns: True if the pattern is in the reading, False otherwise.
"""
return "".join(map(str, pattern)) in "".join(map(str, reading))
def watch_for_line(self, max_time):
"""Recieves time period for which to continuously watch for line.
Returns True when line is found.
Returns False if line is not found before time is hit.
"""
start_time = time()
while True:
reading = self.ir_hub.read_all()
for name, array in reading.iteritems():
if self.reading_contains_pattern([1, 1], array):
return {"line_found": True,
"time_elapsed": time() - start_time}
if time() - start_time > max_time:
return {"line_found": False,
"time_elapsed": time() - start_time}
@lib.api_call
def assign_states(self, current_ir_reading=None):
""" on_x=True flag does not allow intersection errors
once left&right arrays clear intersection, on_x = false.
Take 4x16 bit arrays and assigns the array to proper orientations.
Note that the proper orientations are front, back, left and right.
"""
# Keep prev front to ignore noise conditions
# Keep prev back state to ignore large objects on back array
prev_front_state = self.front_state
prev_back_state = self.back_state
# Get the current IR readings
if current_ir_reading is None:
current_ir_reading = self.ir_hub.read_binary(Follower.White_Black)
# using heading, make front/back/left/right state assignments
self.determine_states(current_ir_reading)
# Clear on_x flag if off line on side arrays
if(self.on_x and ((self.right_state > 15) or (self.left_state > 15))):
self.on_x = False
# Check for error conditions
if((self.front_state > 15) or (self.back_state > 15) or
(self.right_state < Follower.No_Line) and
(self.left_state < Follower.No_Line)):
# Lost Lines Superscede other conditions
if((self.front_state == Follower.No_Line) and
(self.back_state == Follower.No_Line)):
# Front and back lost line
self.error = "LOST_LINE"
elif(self.front_state == Follower.No_Line):
# Front lost line
self.error = "FRONT_LOST"
elif(self.back_state == Follower.No_Line):
# Back lost line
self.error = "BACK_LOST"
# Intersection preceds Large Object
elif ((not (self.right_state == Follower.No_Line) and
not (self.left_state == Follower.No_Line)) and
not self.on_x):
# Found Intersection because left and right lit up
# if on_x=True, ignore this error
self.error = "ON_INTERSECTION"
elif((self.front_state == Follower.Large_Object)):
# Found large object on front array.
self.error = "LARGE_OBJECT"
else:
if(self.back_state == Follower.Large_Object):
# Ignore large objects on back array
# by using prev back state
self.back_state = prev_back_state
if(self.front_state == Follower.Noise):
# Ignore Noise conditions
self.front_state = prev_front_state
if(self.back_state == Follower.Noise):
# Ignore Noise conditions
self.back_state = prev_back_state
# self.error = "NONE"
else: # no errors
self.error = "NONE"
self.logger.info("FS: {}, BS {}, LS {}, RS {}".format(
self.front_state,
self.back_state,
self.left_state,
self.right_state))
return self.front_state, self.back_state, \
self.left_state, self.right_state
def determine_states(self, current_ir_reading):
if self.heading is None:
self.heading = 180 # use implicit default value for testing
self.logger.info("Using Test Heading = 180")
# Heading east
if self.heading == 270:
# Forward is on the left side
self.front_state = self.get_position_lr(
current_ir_reading["left"])
# Back is on the right side
self.back_state = self.get_position_rl(
current_ir_reading["right"])
# Left is on the back
self.left_state = self.get_position_lr(
current_ir_reading["back"])
# Right is on the front
self.right_state = self.get_position_rl(
current_ir_reading["front"])
# Heading west
elif self.heading == 90:
# Forward is on the right side
self.front_state = self.get_position_lr(
current_ir_reading["right"])
# Back is on the left side
self.back_state = self.get_position_rl(
current_ir_reading["left"])
# Left is on the front
self.left_state = self.get_position_lr(
current_ir_reading["front"])
# Right is on the back
self.right_state = self.get_position_rl(
current_ir_reading["back"])
# Heading south
elif self.heading == 180:
# Forward is on the front | |
<reponame>csssuf/pulumi-kubernetes
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = [
'PodDisruptionBudgetArgs',
'PodDisruptionBudgetSpecArgs',
'PodDisruptionBudgetStatusArgs',
]
@pulumi.input_type
class PodDisruptionBudgetArgs:
def __init__(__self__, *,
api_version: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
spec: Optional[pulumi.Input['PodDisruptionBudgetSpecArgs']] = None,
status: Optional[pulumi.Input['PodDisruptionBudgetStatusArgs']] = None):
"""
PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['PodDisruptionBudgetSpecArgs'] spec: Specification of the desired behavior of the PodDisruptionBudget.
:param pulumi.Input['PodDisruptionBudgetStatusArgs'] status: Most recently observed status of the PodDisruptionBudget.
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'policy/v1')
if kind is not None:
pulumi.set(__self__, "kind", 'PodDisruptionBudget')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['PodDisruptionBudgetSpecArgs']]:
"""
Specification of the desired behavior of the PodDisruptionBudget.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['PodDisruptionBudgetSpecArgs']]):
pulumi.set(self, "spec", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input['PodDisruptionBudgetStatusArgs']]:
"""
Most recently observed status of the PodDisruptionBudget.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input['PodDisruptionBudgetStatusArgs']]):
pulumi.set(self, "status", value)
@pulumi.input_type
class PodDisruptionBudgetSpecArgs:
def __init__(__self__, *,
max_unavailable: Optional[pulumi.Input[Union[int, str]]] = None,
min_available: Optional[pulumi.Input[Union[int, str]]] = None,
selector: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
:param pulumi.Input[Union[int, str]] max_unavailable: An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable".
:param pulumi.Input[Union[int, str]] min_available: An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] selector: Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.
"""
if max_unavailable is not None:
pulumi.set(__self__, "max_unavailable", max_unavailable)
if min_available is not None:
pulumi.set(__self__, "min_available", min_available)
if selector is not None:
pulumi.set(__self__, "selector", selector)
@property
@pulumi.getter(name="maxUnavailable")
def max_unavailable(self) -> Optional[pulumi.Input[Union[int, str]]]:
"""
An eviction is allowed if at most "maxUnavailable" pods selected by "selector" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with "minAvailable".
"""
return pulumi.get(self, "max_unavailable")
@max_unavailable.setter
def max_unavailable(self, value: Optional[pulumi.Input[Union[int, str]]]):
pulumi.set(self, "max_unavailable", value)
@property
@pulumi.getter(name="minAvailable")
def min_available(self) -> Optional[pulumi.Input[Union[int, str]]]:
"""
An eviction is allowed if at least "minAvailable" pods selected by "selector" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying "100%".
"""
return pulumi.get(self, "min_available")
@min_available.setter
def min_available(self, value: Optional[pulumi.Input[Union[int, str]]]):
pulumi.set(self, "min_available", value)
@property
@pulumi.getter
def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.
"""
return pulumi.get(self, "selector")
@selector.setter
def selector(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "selector", value)
@pulumi.input_type
class PodDisruptionBudgetStatusArgs:
def __init__(__self__, *,
current_healthy: pulumi.Input[int],
desired_healthy: pulumi.Input[int],
disruptions_allowed: pulumi.Input[int],
expected_pods: pulumi.Input[int],
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['_meta.v1.ConditionArgs']]]] = None,
disrupted_pods: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
observed_generation: Optional[pulumi.Input[int]] = None):
"""
PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.
:param pulumi.Input[int] current_healthy: current number of healthy pods
:param pulumi.Input[int] desired_healthy: minimum desired number of healthy pods
:param pulumi.Input[int] disruptions_allowed: Number of pod disruptions that are currently allowed.
:param pulumi.Input[int] expected_pods: total number of pods counted by this disruption budget
:param pulumi.Input[Sequence[pulumi.Input['_meta.v1.ConditionArgs']]] conditions: Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute
the number of allowed disruptions. Therefore no disruptions are
allowed and the status of the condition will be False.
- InsufficientPods: The number of pods are either at or below the number
required by the PodDisruptionBudget. No disruptions are
allowed and the status of the condition will be False.
- SufficientPods: There are more pods than required by the PodDisruptionBudget.
The condition will be True, and the number of allowed
disruptions are provided by the disruptionsAllowed property.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] disrupted_pods: DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.
:param pulumi.Input[int] observed_generation: Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.
"""
pulumi.set(__self__, "current_healthy", current_healthy)
pulumi.set(__self__, "desired_healthy", desired_healthy)
pulumi.set(__self__, "disruptions_allowed", disruptions_allowed)
pulumi.set(__self__, "expected_pods", expected_pods)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if disrupted_pods is not None:
pulumi.set(__self__, "disrupted_pods", disrupted_pods)
if observed_generation is not None:
pulumi.set(__self__, "observed_generation", observed_generation)
@property
@pulumi.getter(name="currentHealthy")
def current_healthy(self) -> pulumi.Input[int]:
"""
current number of healthy pods
"""
return pulumi.get(self, "current_healthy")
@current_healthy.setter
def current_healthy(self, value: pulumi.Input[int]):
pulumi.set(self, "current_healthy", value)
@property
@pulumi.getter(name="desiredHealthy")
def desired_healthy(self) -> pulumi.Input[int]:
"""
minimum desired number of healthy pods
"""
return pulumi.get(self, "desired_healthy")
@desired_healthy.setter
def desired_healthy(self, value: pulumi.Input[int]):
pulumi.set(self, "desired_healthy", value)
@property
@pulumi.getter(name="disruptionsAllowed")
def disruptions_allowed(self) -> pulumi.Input[int]:
"""
Number of pod disruptions that are currently allowed.
"""
return pulumi.get(self, "disruptions_allowed")
@disruptions_allowed.setter
def disruptions_allowed(self, value: pulumi.Input[int]):
pulumi.set(self, "disruptions_allowed", value)
@property
@pulumi.getter(name="expectedPods")
def expected_pods(self) -> pulumi.Input[int]:
"""
total number of pods counted by this disruption budget
"""
return pulumi.get(self, "expected_pods")
@expected_pods.setter
def expected_pods(self, value: pulumi.Input[int]):
pulumi.set(self, | |
* 2]),
int(params[pointIndex * 2 + 1])))
expectedPoints = (self._pointCount + (self.closed() and 1 or 0))
moreToCome = len(self.points) < expectedPoints
if len(self.points) > self._pointCount:
if len(self.points) > expectedPoints:
sys.stderr.write("WARNING: read too many points?!\n")
del self.points[self._pointCount:]
return moreToCome
def _readPolylineBase(params):
result = PolylineBase()
# retainPoints is not actually necessary for PolylineBase objects:
result.changeType(int(params[0]), retainPoints=True)
result.lineStyle = LineStyle.read(params[1])
result.lineWidth = int(params[2])
result.penColor = Color.read(params[3])
result.fillColor = Color.read(params[4])
result.depth = int(params[5])
result.penStyle = int(params[6])
result.fillStyle = FillStyle.read(params[7])
result.styleValue = float(params[8])
result.joinStyle = JoinStyle.read(params[9])
result.capStyle = CapStyle.read(params[10])
result.radius = int(params[11])
subLines = 0
if int(params[12]):
result.forwardArrow = True
subLines += 1
if int(params[13]):
result.backwardArrow = True
subLines += 1
result._pointCount = int(params[14])
subLines += (result._pointCount + 5) / 6 # sublines to read for the points
if result.closed():
result._pointCount -= 1
if isinstance(result, PictureBBox):
subLines += 1
return result, subLines
class PolyBox(PolylineBase):
"""Represents a rectangular closed box object."""
__slots__ = ()
def __init__(self, x1, y1, x2, y2):
PolylineBase.__init__(self)
self.points.append(Vector(x1, y1))
self.points.append(Vector(x2, y1))
self.points.append(Vector(x2, y2))
self.points.append(Vector(x1, y2))
def polylineType(self):
"""Return type of this polygon (PolygonType.Box for all `PolyBox` objects),
see `changeType`."""
return PolygonType.Box
def closed(self):
"""Return whether this polygon is closed (True for all
`PolyBox` objects.)"""
return True
def center(self):
"""Return (x, y) coordinate tuple of the midpoint of this box."""
return ((self.points[0][0] + self.points[2][0]) / 2,
(self.points[0][1] + self.points[2][1]) / 2)
def upperLeft(self):
"""Return coordinates of upper left corner."""
return self.points[0]
def lowerRight(self):
"""Return coordinates of lower right corner."""
return self.points[2]
def width(self):
"""Return width of this box."""
return abs(self.points[2][0] - self.points[0][0])
def height(self):
"""Return height of this box."""
return abs(self.points[2][1] - self.points[0][1])
class ArcBox(PolyBox):
"""Represents a rectangular box with rounded corners."""
__slots__ = ()
def polylineType(self):
"""Return type of this polygon (PolygonType.ArcBox for all `ArcBox` objects),
see `changeType`."""
return PolygonType.ArcBox
class Polygon(PolylineBase):
"""Represents a closed polygon object."""
__slots__ = ()
def __init__(self, points, closed=True):
PolylineBase.__init__(self)
self.points = points
if not closed:
self.changeType(PolygonType.Polyline, retainPoints=True)
def polylineType(self):
"""Return type of this polygon (PolygonType.Polygon for all `Polygon` objects),
see `changeType`."""
return PolygonType.Polygon
def closed(self):
"""Return whether this polygon is closed (True for all
`Polygon` objects.)"""
return True
class Polyline(PolylineBase):
"""Represents an open polygon object."""
__slots__ = ()
def __init__(self, points):
PolylineBase.__init__(self)
self.points = points
def polylineType(self):
"""Return type of this polygon (PolygonType.Polyline for all `Polyline`
objects), see `changeType`."""
return PolygonType.Polyline
def closed(self):
"""Return whether this polygon is closed (False for all
`Polygon` objects.)"""
return False
class PictureBBox(PolyBox):
"""Represents a picture embedded in an XFig file. The filename is
stored in the `filename` attribute."""
__slots__ = ()
def __init__(self, x1, y1, x2, y2, filename, flipped=False):
PolyBox.__init__(self, x1, y1, x2, y2)
self.filename = filename
self.flipped = flipped
def polylineType(self):
"""Return type of this polygon (PolygonType.PictureBBox for all
`PictureBBox` objects), see `changeType`."""
return PolygonType.PictureBBox
def closed(self):
"""Return whether this polygon is closed (True for all
`PictureBBox` objects.)"""
return True
# --------------------------------------------------------------------
# splines
# --------------------------------------------------------------------
class SplineBase(Object):
"""Base class of Spline objects (`ApproximatedSpline`,
`InterpolatedSpline`, `XSpline`)."""
__slots__ = ("points", "_shapeFactors", "_closed", "_pointCount")
def __init__(self, points=None, shapeFactors=None, closed=True):
Object.__init__(self)
self.points = points or []
self._shapeFactors = shapeFactors or []
self._closed = closed
def closed(self):
"""Return whether this spline curve is closed."""
assert self._closed != None, "SplineBase.closed(): _closed not initialized!"
return self._closed
def shapeFactors(self):
"""Return shape factors. The return value is fixed for
non-XSplines. For XSplines, self._shapeFactors is used (no
public API ATM)."""
result = self._shapeFactors
if not len(result): # or self.defaultShapeFactor():
# create default shapeFactors if not initialized
result = [self.defaultShapeFactor()] * len(self.points)
if not self.closed():
result[0] = 0.0
result[-1] = 0.0
return result
def changeType(self, splineType):
"""Change type of this Spline object. `splineType` may be one
of the `SplineType.XXX` constants:
- SplineType.OpenApproximated
- SplineType.ClosedApproximated
- SplineType.OpenInterpolated
- SplineType.ClosedInterpolated
- SplineType.OpenXSpline
- SplineType.ClosedXSpline
This method may change the type of this object to another
`SplineBase`-derived class."""
if splineType == SplineType.OpenApproximated:
self.__class__ = ApproximatedSpline
self._closed = False
elif splineType == SplineType.ClosedApproximated:
self.__class__ = ApproximatedSpline
self._closed = True
elif splineType == SplineType.OpenInterpolated:
self.__class__ = InterpolatedSpline
self._closed = False
elif splineType == SplineType.ClosedInterpolated:
self.__class__ = InterpolatedSpline
self._closed = True
elif splineType == SplineType.OpenXSpline:
self.__class__ = XSpline
self._closed = False
elif splineType == SplineType.ClosedXSpline:
self.__class__ = XSpline
self._closed = True
def __str__(self):
pointCount = len(self.points)
hasForwardArrow = (self.forwardArrow != None and 1 or 0)
hasBackwardArrow = (self.backwardArrow != None and 1 or 0)
result = self._joinWithProperties(
ObjectType.Spline, self.splineType(),
self.capStyle,
hasForwardArrow, hasBackwardArrow,
pointCount) + "\n"
if hasForwardArrow:
result += "\t" + str(self.forwardArrow)
if hasBackwardArrow:
result += "\t" + str(self.backwardArrow)
i = self._savePointIter()
for linePoints in map(None, *(i,) * 12):
result += "\t" + _join(*[p for p in linePoints if p != None]) + "\n"
i = iter(self.shapeFactors())
for lineSF in map(None, *(i,) * 8):
result += "\t" + _join(*[str(sf) for sf in lineSF if sf != None]) + "\n"
return result
def _savePointIter(self):
for p in self.points:
yield p[0]
yield p[1]
def bounds(self):
"""Return the bounds of this object. This is not accurate at
all, since it simply returns the bounding box of the support
points, but the curve may well run outside of that box."""
# FIXME
result = Rect()
for point in self.points:
result(point)
return result
def _readSub(self, params):
if self.forwardArrow == True:
self.forwardArrow = readArrow(params)
return True
if self.backwardArrow == True:
self.backwardArrow = readArrow(params)
return True
expectedPoints = self._pointCount
if len(self.points) < expectedPoints:
pointCount = len(params) / 2
for pointIndex in range(pointCount):
self.points.append(Vector(int(params[pointIndex * 2]),
int(params[pointIndex * 2 + 1])))
if len(self.points) > expectedPoints:
sys.stderr.write("WARNING: read too many points?!\n")
del self.points[expectedPoints:]
return True
if len(self._shapeFactors) < expectedPoints:
sfCount = len(params)
for sfIndex in range(sfCount):
self._shapeFactors.append(float(params[sfIndex]))
moreToCome = len(self._shapeFactors) < expectedPoints
if len(self._shapeFactors) > expectedPoints:
sys.stderr.write("WARNING: read too many shapeFactors?!\n")
del self._shapeFactors[expectedPoints:]
if moreToCome:
return True
return False
class ApproximatedSpline(SplineBase):
"""Represents an open or closed approximated spline object."""
__slots__ = ()
def defaultShapeFactor(self):
return 1.0
def splineType(self):
return self._closed and SplineType.ClosedApproximated or SplineType.OpenApproximated
class InterpolatedSpline(SplineBase):
"""Represents an open or closed interpolated spline object."""
__slots__ = ()
def defaultShapeFactor(self):
return -1.0
def splineType(self):
return self._closed and SplineType.ClosedInterpolated or SplineType.OpenInterpolated
class XSpline(SplineBase):
"""Represents an open or closed 'x-spline' object."""
__slots__ = ()
def defaultShapeFactor(self):
return 0.0 # ATT: this value is checked in shapeFactors() ATM
def splineType(self):
return self._closed and SplineType.ClosedXSpline or SplineType.OpenXSpline
def _readSplineBase(params):
result = SplineBase()
result.changeType(int(params[0]))
result.lineStyle = LineStyle.read(params[1])
result.lineWidth = int(params[2])
result.penColor = Color.read(params[3])
result.fillColor = Color.read(params[4])
result.depth = int(params[5])
result.penStyle = int(params[6])
result.fillStyle = FillStyle.read(params[7])
result.styleValue = float(params[8])
result.capStyle = CapStyle.read(params[9])
subLines = 0
if int(params[10]):
result.forwardArrow = True
subLines += 1
if int(params[11]):
result.backwardArrow = True
subLines += 1
result._pointCount = int(params[12])
subLines += (result._pointCount + 5) / 6 # sublines to read for the points
return result, subLines
# --------------------------------------------------------------------
# text objects
# --------------------------------------------------------------------
class Text(Object):
"""Represents a text object. Text instances have a number of
extra attributes:
- text (the string)
- x, y (position)
- alignment (cf. `Alignment.XXX` constants)
- font (cf. Font.XXX constants)
- fontSize (default: 12)
- fontFlags (cf. `FontFlag.XXX` constants, default: FontFlag.PostScript)
- angle (default: 0.0)
- length, height (dummy values, no guarantee about correctness)
"""
__slots__ = ("text", "pos", "alignment",
"font", "fontSize", "fontFlags", "angle",
"length", "height")
def __init__(self, pos, text,
font=None, fontSize=12, fontFlags=FontFlag.PostScript,
alignment=Alignment.Left, angle=0.0):
Object.__init__(self)
self.pos = pos
self.text = text
self.font = font
self.fontSize = fontSize
self.fontFlags = fontFlags
self.alignment = alignment
self.angle = angle
self.height = 136
self.length = 100 # dummy value
def _guessHeight(self):
"""Guessed height of font in fig units."""
return self.fontSize * 34 / 3
def _length(self):
"""FIXME: If this is corrected, remove underscore prefix."""
return 100
def bounds(self):
result = Rect()
if self.alignment == Alignment.Left:
result((self.pos[0], self.pos[1] - self.height))
result((self.pos[0] + self.length, self.pos[1]))
elif self.alignment == Alignment.Centered:
result((self.pos[0] - self.length / 2, self.pos[1] - self.height))
result((self.pos[0] + self.length / 2, self.pos[1]))
elif self.alignment == Alignment.Right:
result((self.pos[0], self.pos[1] - self.height))
result((self.pos[0] + self.length, self.pos[1]))
return result
# def __str__(self):
# font = self.font
# if self.font is None:
# font = self.fontFlags & FontFlag.PostScript \
# and fontDefault or LaTeXFont.Default
# result = | |
"""
print("REFRESH")
self.download_data()
return
print("PROCESS")
self.process_data()
print("OK")
self.data_status = "current"
self.cds.selected.on_change('indices',self.on_selection_change_callback)
self.country_select.options=sorted(self.adfCountryData.keys())
self.compute_data_status()
self.country_select.value = "Germany"
def on_selection_change_callback(self,attr,old,new):
"""Handling of (de)selecting data in the time series plots.
If a selection is made, it unlocks the SAVE button.
Also, the heatmap display would not sync itself to the range selection so this is done here,
"""
# (un)lock Save button
if len(self.cds.selected.indices) > 0:
self.save.disabled = False
else:
self.save.disabled = True
# make selection in the heatmap
dates = []
for i in self.cds.selected.indices:
dates.append(self.cds.data["datetime_date"][i])
selection = []
i = 0
for d in self.cds_OxCGRTHeatmap.data["datetime_date"]:
if d in dates:
selection.append(i)
i += 1
self.cds_OxCGRTHeatmap.selected.indices = selection
def gumbel_choices_callback(self,attr,old,new):
removed = list(set(old)-set(new))
added = list(set(new)-set(old))
self.gumbel_visibility_lock = True # interlock to avoid ping-pong of callbacks
for v in removed:
idx = int(v)
self.gumbel_wave_renderers[idx].visible = False
for v in added:
idx = int(v)
self.gumbel_wave_renderers[idx].visible = True
num_datapoints = len(self.cds_gumbel_waves.data["summed_waves"])
summed_data = [0. for i in range(num_datapoints)]
chosen_waves = []
for w in new:
wave = "wave_{}".format(w)
for j in range(num_datapoints):
summed_data[j] += self.cds_gumbel_waves.data[wave][j]
self.cds_gumbel_waves.data["summed_waves"] = summed_data
self.gumbel_visibility_lock = False # interlock to avoid ping-pong of callbacks
def compute_metrics(self,bins=25):
"""using self.dfVotesContent, this computes stats for display
"""
conn = self.engine.connect()
try:
ddf = pd.read_sql("select DISTINCT from_dt,to_dt,user,kind,vote_id,rel_peak_new_cases,duration from cookiecutter_verdicts",conn)
except:
ddf = pd.DataFrame()
if len(ddf) > 1:
#ddf = self.dfVotesContent[["from","to","user","kind","filename","rel_peak_new_cases","duration"]].drop_duplicates()
sWaveDurations = ddf[ddf["kind"] == "Wave"].duration
y, x_tmp = np.histogram(sWaveDurations,bins=bins)
width = np.diff(x_tmp)
x = [x_tmp[0]+i*width[i] for i in range(len(y))]
self.cds_wave_duration_histogram.data = {"x":x,"y":y,"color":["tomato" for i in x],"width":width}
sCalmDurations = ddf[ddf["kind"] == "Calm"].duration
y, x_tmp = np.histogram(sCalmDurations,bins=bins)
width = np.diff(x_tmp)
x = [x_tmp[0]+i*width[i] for i in range(len(y))]
self.cds_calm_duration_histogram.data = {"x":x,"y":y,"color":["mediumseagreen" for i in x],"width":width}
peak_cutoff = np.quantile(ddf.rel_peak_new_cases,0.95)
ddf = ddf[ddf["kind"] == "Wave"]
dfHeatmap_tmp = ddf[ddf.rel_peak_new_cases < peak_cutoff][["rel_peak_new_cases","duration"]].copy()
dfHeatmap_tmp["n"] = 1
try:
dfTmp = dfHeatmap_tmp.groupby([pd.cut(dfHeatmap_tmp.rel_peak_new_cases, bins),pd.cut(dfHeatmap_tmp.duration, bins)]).n.sum().unstack()
dfHeatmapData = dfTmp.stack().reset_index().rename(columns={0:"n"}).dropna()
dfHeatmapData["rpc"] = [i.mid for i in dfHeatmapData.rel_peak_new_cases.values]
dfHeatmapData["d"] = [i.mid for i in dfHeatmapData.duration.values]
h = dfHeatmapData.loc[0].duration.length
w = dfHeatmapData.loc[0].rel_peak_new_cases.length
self.cds_votes_heatmap.data = {"n":dfHeatmapData.n.values,"rpc":dfHeatmapData.rpc.values,"d":dfHeatmapData.d.values,
"h":[h for i in dfHeatmapData.index],"w":[w for i in dfHeatmapData.index]}
except:
pass
def save_callback(self,event):
"""Saves the currently made selection in a csv file, updates the status bar
with a corresponding message, and resets the selection (which implicitly will
lock the save button again.
"""
# Save selection
conn = self.engine.connect()
try:
result = conn.execute("SELECT MAX(vote_id) FROM cookiecutter_verdicts")
vote_id = result.fetchone()[0]+1
have_verdict_table = True
except:
print("CANNOT RETRIEVE VOTE_ID")
have_verdict_table = False
vote_id = 1
#print("VOTE ID = {}".format(vote_id))
df = self.cds.to_df().iloc[self.cds.selected.indices]
if max(df["new_cases_rel"]) < 0:
df["rel_peak_new_cases"] = 0.
else:
df["rel_peak_new_cases"] = max(df["new_cases_rel"])
max_selected = max(self.cds.selected.indices)
if max_selected >= len(self.cds.data["new_cases"])-1:
df["kind"] = self.scenario_type.labels[self.scenario_type.active]+"_act"
else:
df["kind"] = self.scenario_type.labels[self.scenario_type.active]
df["kind_counter"] = int(self.scenario_number.value)
df["from_dt"] = df.datetime_date.min()
df["to_dt"] = df.datetime_date.max()
df["duration"] = (pd.to_datetime(df.datetime_date.max())-pd.to_datetime(df.datetime_date.min())).total_seconds()/86400
df["user"] = self.user_id.value ### change to user_name
#ADM0_A3 = self.adm0_a3 #dfMapping[self.dfMapping.name == self.country_select.value].adm0_a3.values[0]
df["identifier"] = self.identifier
df["vote_datetime"] = datetime.datetime.now()
df["vote_id"] = vote_id
df.datetime_date = pd.to_datetime(df.datetime_date)
#print(df.datetime_date)
ddf = self.cds_gumbel_waves.to_df()
ddf = ddf[(df.datetime_date.min() <= ddf.datetime)&(ddf.datetime <= df.datetime_date.max())]
ddf.datetime = pd.to_datetime(ddf.datetime)
i = 0
for r in self.gumbel_wave_renderers:
if not r.visible:
ddf["wave_{:02d}".format(i)] = -1
elif ddf["wave_{:02d}".format(i)].sum() == 0:
del ddf["wave_{:02d}".format(i)]
i += 1
df = df.merge(ddf,left_on="datetime_date",right_on="datetime").rename(columns={"trend_x":"trend"})
del df["trend_y"]
del df["datetime"]
# amend schema if necessary
if have_verdict_table:
meta = sqlalchemy.MetaData()
schema = sqlalchemy.Table("cookiecutter_verdicts",meta, autoload=True, autoload_with=conn)
existing_wave_columns = []
for c in schema.columns:
if "wave" in c.name.lower():
existing_wave_columns.append(c.name.lower())
for c in df.columns:
if "wave" in c:
if c not in existing_wave_columns:
print("ALTER TABLE cookiecutter_verdicts ADD COLUMN {} FLOAT;".format(c))
conn.execute("ALTER TABLE cookiecutter_verdicts ADD COLUMN {} FLOAT;".format(c))
# Avoid loss of precision SQL errors for tiny numbers
for c in df.columns:
if "wave" in c:
df[c] = df[c].clip(lower=0.1)
df.to_sql("cookiecutter_verdicts", conn, if_exists='append', dtype={"from_dt":sqlalchemy.types.Date,
"to_dt":sqlalchemy.types.Date,
"datetime_date":sqlalchemy.types.DateTime,
"vote_datetime":sqlalchemy.types.DateTime,
"kind":sqlalchemy.types.String(10),
"user":sqlalchemy.types.String(50),
"identifier":sqlalchemy.types.String(10)},
index=False,chunksize=25,method=None)
conn.close()
# reset selection
self.cds.selected.indices = []
# update message field
#self.progress_bar_info_message.text = "Saved selection to {}".format(filename)
self.progress_bar_info_message.text = "Saved selection to table cookiecutter_verdicts, vote_id {}".format(vote_id)
self.progress_bar_data.data["color"] = ["limegreen"]
# reset scenario field
self.scenario_name.value = self.country_select.value + " wave calm #"
##self.dfVotesContent = self.dfVotesContent.append(df)
#self.dfVotesContent[self.dfVotesContent.infection_rate_7 > 1000.] = 0.
##self.dfVotesContent["filename"] = filename
##self.dfVotesContent.to_pickle("./data/votes.pickle",protocol=3)
#print(self.dfVotesContent)
self.compute_metrics()
def change_dataset(self,attr,old,new):
sql_query = "SELECT DISTINCT name FROM COOKIECUTTER_CASE_DATA WHERE data_source='{}';".format(new)
conn = self.engine.connect()
df = pd.read_sql(sql_query,conn)
self.country_select.options = sorted(df["name"].values)
if "Germany" in df["name"].values:
self.country_select.value = "Germany"
elif "US-NC North Carolina" in df["name"].values:
self.country_select.value = "US-NC North Carolina"
else:
self.country_select.value = sorted(df["name"].values)[0]
def gumbel_plot_callback(self,attr,old,new):
if self.gumbel_visibility_lock: # interlock to avoid ping-pong of callbacks
return
num_datapoints = len(self.cds_gumbel_waves.data["summed_waves"])
summed_data = [0. for i in range(num_datapoints)]
chosen_waves = []
i = 0
for r in self.gumbel_wave_renderers:
wave = "wave_{:02d}".format(i)
if r.visible:
chosen_waves.append("{:02d}".format(i))
for j in range(num_datapoints):
summed_data[j] += self.cds_gumbel_waves.data[wave][j]
i += 1
self.gumbel_choices.value = chosen_waves
self.cds_gumbel_waves.data["summed_waves"] = summed_data
def change_country(self,attr,old,new):
"""Handle change of country to be displayed.
This generates a dict style data structure that can be used to overwrite the various ColumnDataSource s
that are used to display the values. This is the common bokeh pattern to update a display.
Commonly made mistake is to re-create a new ColumnDataSource and try to squeeze in a pandas
dataframe directly, this, however, will not update the plot.
"""
# fresh data for the time series plots
sql_query = """SELECT cookiecutter_case_data.*,oxford_stringency_index.*
FROM cookiecutter_case_data
INNER JOIN oxford_stringency_index ON cookiecutter_case_data.identifier = oxford_stringency_index.countrycode AND
cookiecutter_case_data.datetime_date = oxford_stringency_index.datetime_date
WHERE cookiecutter_case_data.name='{}' AND oxford_stringency_index.regionname IS NULL
AND cookiecutter_case_data.data_source ='{}'
ORDER BY cookiecutter_case_data.datetime_date;""".format(new,self.dataset_select.value)
conn = self.engine.connect()
dfData = pd.read_sql(sql_query, conn)
if len(dfData) == 0: # most likely no OxCGRT dataset
have_OxCGRT = False
sql_query = "SELECT * FROM cookiecutter_case_data WHERE cookiecutter_case_data.name='{}' AND cookiecutter_case_data.data_source ='{}' ORDER BY cookiecutter_case_data.datetime_date;".format(new,self.dataset_select.value)
dfData = pd.read_sql(sql_query, conn)
print("sql_query {}".format(sql_query))
else:
have_OxCGRT = True
# new we have a clone of datetime_date
dfRubbish = dfData.datetime_date
del dfData["datetime_date"]
dfRubbish.columns=["rubbish","datetime_date"]
#print(dfRubbish)
#dfData.index = dfRubbish.datetime_date
dfData["datetime_date"] = dfRubbish.datetime_date
dfData.index.name = None
sql_query = "SELECT population FROM population_data WHERE identifier='{}'".format(dfData.identifier.unique()[0])
try:
population = int(conn.execute(sql_query).fetchone()[0])
dfData['new_cases_rel'] = dfData['new_cases']/population
except:
dfData['new_cases_rel'] = -1.
print("NO POPULATION DATA FOR identifer ({}) name ({})".format(dfData.identifier.unique()[0],new))
newdata = {}
#print(self.cds.data.keys())
for column in self.cds.data.keys():
if column == "index" or column == "Timestamp" or column == "datetime_date":
#newdata[column] = self.adfCountryData[new].index
newdata[column] = dfData.datetime_date #datetime_date
elif "wave_" in column or column == "summed_waves":
continue
elif column in dfData.columns: # cater for reduced oxCGRT missing queries
newdata[column] = np.nan_to_num(dfData[column])
else:
#newdata[column] = np.nan_to_num(self.adfCountryData[new][column])
if have_OxCGRT:
newdata[column] = np.nan_to_num(dfData[column])
else:
newdata[column] = [-1 for i in range(len(dfData["new_cases"]))]
self.cds.data = newdata
self.scenario_number.value = 1
self.scenario_type.active = 0
# rescale the y axes that require it
#df = self.cds.to_df()
#self.p_top.extra_y_ranges["active"].end = dfData.active.dropna().values.max()*1.1
self.p_top.extra_y_ranges["new_cases"].end = dfData.new_cases.dropna().values.max()*1.1
# now the same thing for the OxCGRT heatmap
ddf = pd.read_sql("SELECT * FROM oxford_stringency_index WHERE countrycode='{}' AND regionname IS NULL;".format(dfData.identifier.unique()[0]),conn,index_col="datetime_date")
dfMeasures = pd.DataFrame(ddf[self.fields_of_interest].stack(), columns=["level"]).reset_index().rename(columns={"level_1":"class"})
newdata = {}
print("HAVE OXCGRT {}".format(have_OxCGRT))
for column in self.cds_OxCGRTHeatmap.data.keys():
if column == "index" or column == "Timestamp":
newdata[column] = dfMeasures.index
#newdata[column] = dfData.datetime_date
else:
if have_OxCGRT:
newdata[column] = np.nan_to_num(dfMeasures[column])
else:
newdata[column] = []
#try: ####
# newdata[column] = np.nan_to_num(dfData[column])
#except:
# print("MISSING COLUMN {}".format(column))
# newdata[column] = [0. for i in dfData.index]
self.cds_OxCGRTHeatmap.data = newdata
# now revisit the background boxes, initially make them all invisible
for b in self.wave_boxes:
b.visible = False
b.fill_color = "#F1F1F1"
# each background box is a BoxAnnotation. Loop through the episode data and color/display them as required.
conn = self.engine.connect()
# fix for #8
dfWaves = pd.read_sql("SELECT * from cookiecutter_computed_waves_chgpoint WHERE name='{}' AND data_source='{}'".format(new,self.dataset_select.value),conn)
conn.close()
last = "new"
box_no = 0
for i,row in dfWaves.iterrows():
if row["kind"] == "begin":
left = row["datetime_date"]
if last == "end":
try:
self.wave_boxes[box_no].left = right
except:
self.wave_boxes[box_no].left = left
self.wave_boxes[box_no].right = left
self.wave_boxes[box_no].visible = True
self.wave_boxes[box_no].fill_color = "#00FF00"
self.wave_boxes[box_no].fill_alpha = 0.05
box_no += 1
last = "begin"
elif row["kind"] == "end":
right = row["datetime_date"]
try:
self.wave_boxes[box_no].left = left
except:
self.wave_boxes[box_no].left = right
self.wave_boxes[box_no].right = right
self.wave_boxes[box_no].visible = True
self.wave_boxes[box_no].fill_color = "#FF0000"
self.wave_boxes[box_no].fill_alpha = 0.05
box_no += 1
last = "end"
if last == "begin":
self.wave_boxes[box_no].left = left
self.wave_boxes[box_no].right = None
self.wave_boxes[box_no].visible = True
self.wave_boxes[box_no].fill_color = "#FF0000"
self.wave_boxes[box_no].fill_alpha = 0.05
elif last == "end":
self.wave_boxes[box_no].left = right
self.wave_boxes[box_no].right = None
| |
import sys
import requests
import os
# Used for debugging gpu errors
# os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import pandas as pd
import networkx as nx
import utils as u
import torch
import torch.distributed as dist
import numpy as np
import time
import datetime
import random
from copy import deepcopy
from datareaders.datareader import Datareader
# taskers
import link_pred_tasker as lpt
# models
from models import egcn_components as mls
from models.gcn import GCN
from models.gat import GAT
from models import egcn_h_old
from models import egcn_o_old
from models.gclstm import GCLSTM
from models.tgat import TGAT
from models.tgatneighborfinder import NeighborFinder as TGAT_NeighborFinder
from models.tgn import TGN
from models.tgn_utils.utils import get_neighbor_finder as TGN_get_neighbor_finder
from models.tgn_utils.utils import (
compute_time_statistics as TGN_compute_time_statistics,
)
import splitter as sp
import Cross_Entropy as ce
import trainer as tr
import heuristictrainer as htr
def random_param_value(param, param_min, param_max, type="int"):
if str(param) is None or str(param).lower() == "none":
if type == "int":
return random.randrange(param_min, param_max + 1)
elif type == "logscale":
interval = np.logspace(np.log10(param_min), np.log10(param_max), num=100)
return np.random.choice(interval, 1)[0]
else:
return random.uniform(param_min, param_max)
else:
return param
def prepare_args(args):
heuristics = [
"cn",
"aa",
"jaccard",
"newton",
"ccpa",
"random_heuristic",
"random_adaptive",
]
static_models = [
"gcn",
"gat",
"random",
] # seal implementation cancelled due to scaling issues
discrete_models = ["egcn_o", "egcn_h", "gclstm", "egcn_h_old", "egcn_o_old"]
continuous_models = ["tgat", "tgn"]
args.heuristic = args.model in heuristics
if (
not args.model
in static_models + discrete_models + continuous_models + heuristics
):
raise NotImplementedError("Model {} not found".format(args.model))
elif args.model in static_models or args.model in heuristics:
args.temporal_granularity = "static"
elif args.model in discrete_models:
args.temporal_granularity = "discrete"
elif args.model in continuous_models:
args.temporal_granularity = "continuous"
if (
args.num_hist_steps in ["expanding", "static"]
and args.temporal_granularity != "static"
):
raise ValueError(
"An expanding or static time window can only be used with static temporal granularity"
)
if hasattr(args, "gcn_parameters"):
if args.gcn_parameters["layer_2_feats_same_as_l1"]:
args.gcn_parameters["layer_2_feats"] = args.gcn_parameters["layer_1_feats"]
if (
"lstm_l2_feats_name_as_l1" in args.gcn_parameters.keys()
) and args.gcn_parameters["layer_2_feats_same_as_l1"]:
args.gcn_parameters["layer_2_feats"] = args.gcn_parameters["layer_1_feats"]
return args
def build_tasker(args, dataset, temporal_granularity):
if args.task == "link_pred":
return lpt.Link_Pred_Tasker(args, dataset, temporal_granularity)
elif args.task == "edge_cls":
return ect.Edge_Cls_Tasker(args, dataset)
elif args.task == "node_cls":
return nct.Node_Cls_Tasker(args, dataset)
elif args.task == "static_node_cls":
return nct.Static_Node_Cls_Tasker(args, dataset)
else:
raise NotImplementedError("still need to implement the other tasks")
def build_gcn(args, tasker, dataset, splitter, feats_per_node):
gcn_args = u.Namespace(args.gcn_parameters)
gcn_args.feats_per_node = feats_per_node
if args.model == "simplegcn": # Same as 'gcn' only manually implemented
gcn = mls.Sp_GCN(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif args.model == "gcn": # GCN but the PyGeometric implementation
gcn = GCN(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif args.model == "gat":
gcn = GAT(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif args.model == "gclstm":
gcn = GCLSTM(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif args.model == "skipgcn":
gcn = mls.Sp_Skip_GCN(gcn_args, activation=torch.nn.RReLU()).to(args.device)
elif args.model == "skipfeatsgcn":
gcn = mls.Sp_Skip_NodeFeats_GCN(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif args.model == "tgat":
force_random_edge_features = (
hasattr(args, "force_random_edge_features")
and args.force_random_edge_features == True
)
neighborhood_finder = TGAT_NeighborFinder(dataset)
edge_features, node_features = u.get_initial_features_continuous(
args, gcn_args, dataset, force_random_edge_features
)
print(
"edge feature and node features size",
edge_features.shape,
node_features.shape,
)
args.gcn_parameters["layer_2_feats"] = node_features.shape[1]
gcn = TGAT(
gcn_args,
neighborhood_finder,
node_features,
edge_features,
num_layers=gcn_args.num_layers,
n_head=gcn_args.attention_heads,
drop_out=gcn_args.dropout,
device=args.device,
).to(args.device)
elif args.model == "tgn":
# Default values
n_neighbors = 20
uniform = False # Uniform_neighborhood_finder_sampling
message_dim = 100
memory_update_at_end = (
False # Update memory at the beginning or at the end of the batch
)
embedding_module = "graph_attention" # choices=["graph_attention", "graph_sum", "identity", "time"]
message_function = "identity" # choices=['identity', 'mlp']
aggregator = "last"
memory_updater = "gru" # choices=['gru', 'rnn']
use_destination_embedding_in_message = False
use_source_embedding_in_message = False
neighborhood_finder = TGN_get_neighbor_finder(dataset, uniform)
force_random_edge_features = (
hasattr(args, "force_random_edge_features")
and args.force_random_edge_features == True
)
edge_features, node_features = u.get_initial_features_continuous(
args, gcn_args, dataset, force_random_edge_features
)
args.gcn_parameters["layer_2_feats"] = node_features.shape[1]
memory_dim = node_features.shape[1]
# Compute time statistics
sources = dataset.edges["idx"][:, dataset.cols.source]
destinations = dataset.edges["idx"][:, dataset.cols.target]
timestamps = dataset.edges["idx"][:, dataset.cols.time]
(
mean_time_shift_src,
std_time_shift_src,
mean_time_shift_dst,
std_time_shift_dst,
) = TGN_compute_time_statistics(sources, destinations, timestamps)
gcn = TGN(
neighbor_finder=neighborhood_finder,
node_features=node_features,
edge_features=edge_features,
device=args.device,
n_layers=gcn_args.num_layers,
n_heads=gcn_args.attention_heads,
dropout=gcn_args.dropout,
use_memory=gcn_args.use_memory,
message_dimension=message_dim,
memory_dimension=memory_dim,
memory_update_at_start=not memory_update_at_end,
embedding_module_type=embedding_module,
message_function=message_function,
aggregator_type=aggregator,
memory_updater_type=memory_updater,
n_neighbors=n_neighbors,
mean_time_shift_src=mean_time_shift_src,
std_time_shift_src=std_time_shift_src,
mean_time_shift_dst=mean_time_shift_dst,
std_time_shift_dst=std_time_shift_dst,
use_destination_embedding_in_message=use_destination_embedding_in_message,
use_source_embedding_in_message=use_source_embedding_in_message,
).to(args.device)
elif args.model == "random":
gcn = mls.Random(gcn_args, args.device).to(args.device)
else:
assert args.num_hist_steps > 0, "more than one step is necessary to train LSTM"
if args.model == "lstmA":
gcn = mls.Sp_GCN_LSTM_A(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif args.model == "gruA":
gcn = mls.Sp_GCN_GRU_A(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif args.model == "lstmB":
gcn = mls.Sp_GCN_LSTM_B(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif args.model == "gruB":
gcn = mls.Sp_GCN_GRU_B(gcn_args, activation=torch.nn.RReLU()).to(
args.device
)
elif args.model == "egcn_h":
gcn = egcn_h.EGCN(gcn_args, activation=torch.nn.RReLU(), device=args.device)
elif args.model == "egcn_o":
gcn = egcn_o.EGCN(gcn_args, activation=torch.nn.RReLU(), device=args.device)
elif args.model == "egcn_h_old":
gcn = egcn_h_old.EGCN(
gcn_args, activation=torch.nn.RReLU(), device=args.device
)
elif args.model == "egcn_o_old":
gcn = egcn_o_old.EGCN(
gcn_args, activation=torch.nn.RReLU(), device=args.device
)
elif args.model == "skipfeatsegcn_h":
gcn = egcn_h.EGCN(
gcn_args,
activation=torch.nn.RReLU(),
device=args.device,
skipfeats=True,
)
else:
raise NotImplementedError("simple Model not found")
return gcn, args
def build_heuristic(args):
# Implementations inspired by networkx https://github.com/networkx/networkx/blob/93c99da588bf5b31c42cbad7de09f96f1754dbf7/networkx/algorithms/link_prediction.py
def _apply_prediction(G, func, ebunch=None):
if ebunch is None:
ebunch = nx.non_edges(G)
return ((u, v, func(u, v)) for u, v in ebunch)
if args.model == "cn":
def predict_cn(G, ebunch=None):
def cn(u, v):
return len(list(nx.common_neighbors(G, u, v)))
return _apply_prediction(G, cn, ebunch)
return predict_cn
elif args.model == "aa":
return nx.adamic_adar_index
elif args.model == "jaccard":
return nx.jaccard_coefficient
elif args.model == "newton":
def predict_newton(G, ebunch=None):
shortest_path = nx.shortest_path(G)
def newton(u, v):
try:
path_len = len(shortest_path[u][v])
except KeyError: # Path does not exist
return 0
return (G.degree[u] * G.degree[v]) / path_len ** 2
return _apply_prediction(G, newton, ebunch)
return predict_newton
elif args.model == "ccpa":
def predict_ccpa(G, ebunch=None, alpha=0.8):
shortest_path = nx.shortest_path(G)
def ccpa(u, v):
try:
path_len = len(shortest_path[u][v])
except KeyError: # Path does not exist
return 0
return alpha * len(list(nx.common_neighbors(G, u, v))) + (1 - alpha) * (
G.number_of_nodes() / (path_len - 1)
)
return _apply_prediction(G, ccpa, ebunch)
return predict_ccpa
def build_classifier(args, tasker):
if "node_cls" == args.task or "static_node_cls" == args.task:
mult = 1
else:
mult = 2
if "gru" in args.model or ("lstm" in args.model and not args.model == "gclstm"):
in_feats = args.gcn_parameters["lstm_l2_feats"] * mult
elif args.model == "skipfeatsgcn" or args.model == "skipfeatsegcn_h":
in_feats = (
args.gcn_parameters["layer_2_feats"] + args.gcn_parameters["feats_per_node"]
) * mult
else:
in_feats = args.gcn_parameters["layer_2_feats"] * mult
return mls.Classifier(
args, in_features=in_feats, out_features=tasker.num_classes
).to(args.device)
# Return list of args ready for use that the framework iterates through for the grid search
# Each args in the list is the args used for each run of the grid search
def build_grid(all_args):
lists_not_included_in_grid_search = ["class_weights", "comments"]
args_dict = vars(all_args)
# Gather parameters for permutation
for_permutation = {}
for key in args_dict:
if (
type(args_dict[key]) is list
and not key in lists_not_included_in_grid_search
):
for_permutation[key] = args_dict[key]
elif type(args_dict[key]) is dict:
d = args_dict[key]
for inner_key in d:
if type(d[inner_key]) is list:
for_permutation["{}.{}".format(key, inner_key)] = d[inner_key]
# Convenience
# Putting learning rate at the end, it will be ordered by permutate to be the outermost parameter in the grid search
# Thus for continuous models, the training of the encoder happens intermittently, rather than all at once in the beginning
if all_args.model in ["tgn", "tgat"]:
lr = for_permutation.pop("learning_rate")
for_permutation["learning_rate"] = lr
args_list = []
def permutate(for_permutation, args_dict, permutated):
if for_permutation == {}:
# Add grid arg to show which grid cell this is
args_dict["grid"] = permutated
args_list.append(args_dict)
else:
new_for_permutation = deepcopy(for_permutation)
param_name, param_values = new_for_permutation.popitem()
for param in param_values:
new_args_dict = deepcopy(args_dict)
new_permutated = deepcopy(permutated)
new_permutated[param_name] = param
if "." in param_name:
key, inner_key = param_name.split(".")
new_args_dict[key][inner_key] = param
else:
new_args_dict[param_name] = param
permutate(new_for_permutation, new_args_dict, new_permutated)
permutate(for_permutation, args_dict, {})
assert len(args_list) == u.prod(
[len(param_list) for param_list in for_permutation.values()]
)
return [u.Namespace(args) for args in args_list]
def read_data_master(args, dataset_name=None):
if not dataset_name:
dataset_name = args.data
master = pd.read_csv(os.path.join("config", "data_master.csv"), index_col=0)
if not dataset_name in master:
error_mssg = "Dataset not found in data master. Dataset name {}.\n".format(
dataset_name
)
error_mssg += "Available datasets are as follows:\n"
error_mssg += "\n".join(master.keys())
raise ValueError(error_mssg)
meta_info = master[dataset_name]
args.data_filepath = os.path.join("data", meta_info["filename"])
args.snapshot_size = float(meta_info["snapshot size"])
args.train_proportion = float(meta_info["train proportion"])
args.val_proportion = float(meta_info["val proportion"])
steps_acc = meta_info["steps accounted"]
try:
args.steps_accounted = int(steps_acc)
except ValueError:
args.steps_accounted = None
if meta_info["node encoding"] == "2 hot":
args.use_2_hot_node_feats = True
args.use_1_hot_node_feats = False
else:
args.use_2_hot_node_feats = False
args.use_1_hot_node_feats = True
return args
def run_experiment(args):
### Seed, rank and cuda
global rank, wsize, use_cuda
# if hasattr(args, 'ncores') and type(args.ncores) == type(1) and args.ncores >= 1:
# print(args.ncores)
# torch.set_num_threads(16)
args.use_cuda = torch.cuda.is_available() and args.use_cuda
args.device = "cpu"
if args.use_cuda:
args.device = | |
<reponame>pulkitag/pyphy-engine
import numpy as np
import matplotlib.pyplot as plt
import collections as co
import cairo
import math
import pdb
import copy
from collections import deque
import os
import scipy.io as sio
import scipy.misc as scm
import pickle
#Custom packages
import primitives as pm
import geometry as gm
import physics as phy
import os
from os import path as osp
class DataSaver:
def __init__(self, rootPath='/work5/pulkitag/projPhysics', numBalls=1,
mnBallSz=15, mxBallSz=35,
mnSeqLen=40, mxSeqLen=100,
mnForce=1e+3, mxForce=1e+6, wThick=30,
isRect=True, wTheta=30, mxWLen=600, mnWLen=200,
arenaSz=667, oppForce=False,
svPrefix=None, randSeed=None, verbose=0, **kwargs):
'''
isRect : If the walls need to be rectangular
wTheta : If the walls are NOT rectangular then at what angles should they be present.
mxWLen : Maximum length of the walls
mnWLen : Minimum length of the walls
arenaSz : Size of the arena
svPrefix: Prefix in the file names for saving the data
'''
#print (rootPath)
#The name of the experiment.
self.expStr_ = 'aSz%d_wLen%d-%d_nb%d_bSz%d-%d_f%.2e-%.2e_sLen%d-%d_wTh%d' % (arenaSz,
mnWLen, mxWLen, numBalls, mnBallSz, mxBallSz, mnForce, mxForce,
mnSeqLen, mxSeqLen, wThick)
if svPrefix is not None:
self.expStr_ = svPrefix + '-' + self.expStr_
if not isRect:
if isinstance(wTheta, list):
thetaStr = '_wTheta'.join('%d-' % th for th in wTheta)
thetaStr = thetaStr[0:-1]
else:
thetaStr = '_wTheta%d' % wTheta
self.expStr_ = self.expStr_ + thetaStr
if oppForce:
self.expStr_ = self.expStr_ + '_oppFrc'
#pdb.set_trace()
#Setup directories.
self.dirName_ = os.path.join(rootPath, self.expStr_)
if not os.path.exists(self.dirName_):
os.makedirs(self.dirName_)
self.seqDir_ = os.path.join(self.dirName_, 'seq%06d')
self.mnSeqLen_ = mnSeqLen
self.mxSeqLen_ = mxSeqLen
self.imFile_ = 'im%06d.jpg'
self.dataFile_ = 'data.mat'
self.worldFile_ = 'world.pkl' #Saves the world.
#Setup variables.
self.numBalls_ = numBalls
self.bmn_ = mnBallSz
self.bmx_ = mxBallSz
self.fmn_ = mnForce
self.fmx_ = mxForce
self.wlmx_ = mxWLen
self.wlmn_ = mnWLen
self.xSz_ = arenaSz
self.ySz_ = arenaSz
self.wth_ = wThick
self.isRect_ = isRect
self.oppForce_ = oppForce
self.verbose_ = verbose
if not isinstance(wTheta, list):
wTheta = [wTheta]
self.wTheta_ = wTheta
if randSeed is None:
self.rand_ = np.random.RandomState()
else:
self.rand_ = np.random.RandomState(randSeed)
print ('DATAIO SETUP DONE')
def save(self, numSeq=10):
for i in range(numSeq):
print i
seqLen = int(self.mnSeqLen_ + self.rand_.rand() * (self.mxSeqLen_ - self.mnSeqLen_))
self.seqLen_ = seqLen
seqDir = self.seqDir_ % i
if not os.path.exists(seqDir):
os.makedirs(seqDir)
dataFile = os.path.join(seqDir, self.dataFile_)
imFile = os.path.join(seqDir, self.imFile_)
worldFile = os.path.join(seqDir, self.worldFile_)
self.save_sequence(dataFile, imFile, worldFile)
def save_sequence(self, dataFile, imFile, worldFile):
model, f, ballPos, walls = self.generate_model()
force = np.zeros((2 * self.numBalls_, self.seqLen_)).astype(np.float32)
position = np.zeros((2 * self.numBalls_, self.seqLen_)).astype(np.float32)
#Collect all the objects in the worlds
#objs = {}
#for name in self.world_.get_object_names():
# objs[name] = self.world_.get_object(name)
#pdb.set_trace()
pickle.dump({'force': f, 'ballPos': ballPos, 'walls': self.pts}, open(worldFile,'w'))
for b in range(self.numBalls_):
fb = f[b]
st, en = 2*b, 2*b + 1
force[st,0], force[en,0] = fb.x(), fb.y()
print fb
for i in range(self.seqLen_):
model.step()
im = model.generate_image()
svImFile = imFile % i
scm.imsave(svImFile, im)
for j in range(self.numBalls_):
ballName = 'ball-%d' % j
ball = model.get_object(ballName)
pos = ball.get_position()
position[2*j, i] = pos.x()
position[2*j+1, i] = pos.y()
sio.savemat(dataFile, {'force': force, 'position': position})
def fetch(self, cropSz=None, procSz=None):
seqLen = int(self.mnSeqLen_ + self.rand_.rand() * (self.mxSeqLen_ - self.mnSeqLen_))
self.seqLen_ = seqLen
model, f, ballPos, walls = self.generate_model()
force = np.zeros((2 * self.numBalls_, self.seqLen_)).astype(np.float32)
position = np.zeros((2 * self.numBalls_, self.seqLen_)).astype(np.float32)
velocity = np.zeros((2 * self.numBalls_, self.seqLen_)).astype(np.float32)
imList = []
imBalls = []
for b in range(self.numBalls_):
imBalls.append([])
fb = f[b]
st, en = 2*b, 2*b + 1
force[st,0], force[en,0] = fb.x(), fb.y()
#Previous position
pPos = np.nan * np.zeros((self.numBalls_,2))
for i in range(self.seqLen_):
model.step()
im = model.generate_image()
vx, vy = None, None
for j in range(self.numBalls_):
ballName = 'ball-%d' % j
ball = model.get_object(ballName)
pos = ball.get_position()
position[2*j, i] = pos.x()
position[2*j+1, i] = pos.y()
#Speed should not be predicted, instead we should just predict
#delta in position. The difference between the two is critical
#due to collisions.
if not np.isnan(pPos[j][0]):
vx = pos.x() - pPos[j][0]
vy = pos.y() - pPos[j][1]
pPos[j][0], pPos[j][1] = pos.x(), pos.y()
xMid, yMid = round(pos.x()), round(pos.y())
if cropSz is not None:
imBall = 255 * np.ones((cropSz, cropSz,3)).astype(np.uint8)
#Cropping coordinates in the original image
x1, x2 = max(0, xMid - cropSz/2.0), min(self.xSz_, xMid + cropSz/2.0)
y1, y2 = max(0, yMid - cropSz/2.0), min(self.ySz_, yMid + cropSz/2.0)
#Coordinates in the cropped image centerd at the ball
imX1 = int(round(cropSz/2.0 - (xMid - x1)))
imX2 = int(round(cropSz/2.0 + (x2 - xMid)))
imY1 = int(round(cropSz/2.0 - (yMid - y1)))
imY2 = int(round(cropSz/2.0 + (y2 - yMid)))
x1, x2 = int(round(x1)), int(round(x2))
y1, y2 = int(round(y1)), int(round(y2))
imBall[imY1:imY2,imX1:imX2,:] = im[y1:y2, x1:x2,0:3]
position[2*j, i] = position[2*j, i] - x1 + imX1
position[2*j+1, i] = position[2*j+1, i] - y1 + imY1
if procSz is not None:
posScale = float(procSz)/float(cropSz)
imBall = scm.imresize(imBall, (procSz, procSz))
position[2*j, i] = position[2*j, i] * posScale
position[2*j+1, i] = position[2*j+1, i] * posScale
if vx is not None:
velocity[2*j, i-1] = vx * posScale
velocity[2*j+1, i-1] = vy * posScale
imBalls[j].append(imBall)
imList.append(im)
if cropSz is None:
return imList
else:
return imBalls, force[:, 0:self.seqLen_],\
velocity[:, 0:self.seqLen_],\
position[:, 0:self.seqLen_]
def _generate_model(self, returnPos=False):
#get the coordinates of the top point
#create the world
self.world_ = pm.World(xSz=self.xSz_, ySz=self.ySz_)
#add the walls
walls = self.add_walls()
#add the balls
ballpos = self.add_balls()
#create physics simulation
model = pm.Dynamics(self.world_)
if returnPos:
return model, ballpos, self.pts, walls
else:
return model
def generate_model(self):
model, ballpos, _, walls = self._generate_model(returnPos=True)
#apply initial forces and return the result.
model, fs = self.apply_force(model)
return model, fs, ballpos, walls
#this is mostly due to legacy reasons.
def add_rectangular_walls(self, fColor=pm.Color(1.0, 0.0, 0.0)):
#define the extents within which walls can be put.
hlen = np.floor(self.wlmn_ + self.rand_.rand() * (self.wlmx_ - self.wlmn_))
vlen = np.floor(self.wlmn_ + self.rand_.rand() * (self.wlmx_ - self.wlmn_))
topxmx = self.xSz_ - (hlen + self.wth_)
topymx = self.ySz_ - (vlen + self.wth_)
xleft = np.floor(self.rand_.rand() * topxmx)
ytop = np.floor(self.rand_.rand() * topymx)
walls = self._create_walls(xleft, ytop, (0, 90, 180), (hlen - self.wth_, vlen, hlen - self.wth_),
fColor=fColor)
#define the walls
#wallhordef = pm.walldef(sz=gm.Point(hlen, self.wth_), fColor=fColor)
#wallverdef = pm.walldef(sz=gm.Point(self.wth_, vlen), fColor=fColor)
#self.world_.add_object(wallverdef, initpos=gm.Point(xleft, ytop))
#self.world_.add_object(wallverdef, initpos=gm.Point(xleft + hlen - self.wth_, ytop))
#self.world_.add_object(wallhordef, initpos=gm.Point(xleft, ytop))
#self.world_.add_object(wallhordef, initpos=gm.Point(xleft, ytop + vlen))
#self.pts = [gm.Point(xleft, ytop)]
#self.whl_, self.wvl_ = hlen, vlen
return walls
##
def sample_walls(self):
#for adding diagonal walls
#1. estimate the x and y extents of the wall.
#2. find the appropriate starting position based on that
#sample the theta
perm = self.rand_.permutation(len(self.wTheta_))
wtheta = self.wTheta_[perm[0]]
rad = (wtheta * np.pi)/180.0
hlen = self.wlmn_ + self.rand_.rand() * (self.wlmx_ - self.wlmn_)
if wtheta == 90:
xlen = hlen
ylen = hlen
else:
xlen = hlen * np.cos(rad)
ylen = hlen * np.sin(rad)
xextent = 2 * xlen + 2 * self.wth_
yextent = 2 * ylen + 2 * self.wth_
xleftmin = self.wth_
xleftmax = self.xSz_ - xextent
yleftmin = ylen + self.wth_
if wtheta == 90:
yleftmax = self.ySz_ - self.wth_
else:
yleftmax = self.ySz_ - (ylen + self.wth_)
#keep sampling until the appropriate size has been found.
if xleftmin <= 0 or yleftmin <=0:
return self.sample_walls()
if xleftmax < xleftmin or yleftmax < yleftmin:
return self.sample_walls()
xleft = xleftmin + np.floor(self.rand_.rand() * (xleftmax - xleftmin))
yleft = yleftmin + np.floor(self.rand_.rand() * (yleftmax - yleftmin))
return xleft, yleft, wtheta, hlen
##
def _create_walls(self, xleft, yleft, thetas, wlens, fColor):
theta1, theta2, theta3 = thetas
wlen1, wlen2, wlen3 = wlens
pt1 = gm.Point(xleft, yleft)
dir1 = gm.theta2dir(theta1)
pt2 = pt1 + (wlen1 * dir1)
dir2 = gm.theta2dir(theta2)
pt3 = pt2 + (wlen2 * dir2)
dir3 = gm.theta2dir(theta3)
pt4 = pt3 + (wlen3 * dir3)
pts = [pt1, pt2, pt3, pt4]
if self.verbose_ > 0:
print ("points: ", pt1, pt2, pt3, pt4)
walls = pm.create_cage(pts, wThick = self.wth_, fColor=fColor)
#get the lines within which the balls need to be added.
self.pts = pts
self.lines_ = []
for w in walls:
self.world_.add_object(w)
for i in range(len(pts)):
self.lines_.append(gm.Line(pts[i], pts[np.mod(i+1, len(pts))]))
return walls
def add_walls(self, fColor=pm.Color(1.0, 0.0, 0.0)):
if self.isRect_:
return self.add_rectangular_walls(fColor=fColor)
xleft, yleft, wtheta, hlen = self.sample_walls()
walls = self._create_walls(xleft, yleft, (-wtheta, wtheta, 180-wtheta),
(hlen, hlen, hlen), fColor=fColor)
return walls
def find_point_within_lines(self, mindist):
'''
find a point within the lines which is atleast mindist
from all the boundaries.
'''
x = int(np.round(self.pts[0].x() + self.rand_.rand()*(self.pts[2].x() - self.pts[0].x())))
y = int(np.round(self.pts[1].y() + self.rand_.rand()*(self.pts[3].y() - self.pts[1].y())))
pt = gm.Point(x,y)
isinside = True
dist = []
for (i,l) in enumerate(self.lines_):
#note we are finding the signed distance
dist.append(l.distance_to_point(pt))
if dist[i] <= mindist:
isinside=False
md = min(dist)
return pt, isinside, md
#generates and adds the required number of balls.
def add_balls(self):
#generate ball definitions
allr, allpos = [], []
for i in range(self.numBalls_):
placeflag = True
while placeflag:
#randomly sample the radius of the ball
r = int(np.floor(self.bmn_ + self.rand_.rand() * (self.bmx_ - self.bmn_)))
bdef = pm.BallDef(radius=r, fColor=pm.Color(0.5, 0.5, 0.5))
#find a position to keep the ball
'''
if self.isrect_:
xleft, ytop = self.pts[0].x_asint(), self.pts[0].y_asint()
#xmn = xleft + 2 * r + self.wth_
#ymn = ytop + 2 * r + self.wth_
#xmx = xleft + self.whl_ - self.wth_ - 2 * r
#ymx = ytop + self.wvl_ - self.wth_ - 2 * r
xmn = xleft + r + self.wth_ + 2 #give some margin
ymn = ytop + r + self.wth_ + 2
xmx = xleft + self.whl_ - self.wth_ - r - 2
ymx = ytop + self.wvl_ - self.wth_ - r - 2
xloc = int(np.floor(xmn + (xmx - xmn) * self.rand_.rand()))
yloc = int(np.floor(ymn + (ymx - ymn) * self.rand_.rand()))
else:
'''
findflag = True
count = 0
while findflag:
pt, isvalid, md = self.find_point_within_lines(r + self.wth_ + 2) #2 is safety margin
count += 1
if isvalid:
findflag=False
if count >= 500:
print "failed to find a point to place the ball"
pdb.set_trace()
if self.verbose_ > 0:
print ("ball at (%f, %f), dist: %f" % (pt.x(), pt.y(), md))
xloc, yloc = pt.x_asint(), pt.y_asint()
pt = gm.Point(xloc, yloc)
#determine if the ball can be placed at the chosen position | |
= QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legFront_L0_root_ctl.setPalette(palette)
self.legFront_L0_root_ctl.setAutoFillBackground(True)
self.legFront_L0_root_ctl.setObjectName("legFront_L0_root_ctl")
self.legBack_L0_fk1_ctl = SelectBtn_RFkBox(biped_body)
self.legBack_L0_fk1_ctl.setGeometry(QtCore.QRect(259, 318, 20, 15))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legBack_L0_fk1_ctl.setPalette(palette)
self.legBack_L0_fk1_ctl.setAutoFillBackground(True)
self.legBack_L0_fk1_ctl.setObjectName("legBack_L0_fk1_ctl")
self.legBack_L0_root_ctl = SelectBtn_RIkCircle(biped_body)
self.legBack_L0_root_ctl.setGeometry(QtCore.QRect(227, 306, 16, 16))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 127))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)
self.legBack_L0_root_ctl.setPalette(palette)
self.legBack_L0_root_ctl.setAutoFillBackground(True)
self.legBack_L0_root_ctl.setObjectName("legBack_L0_root_ctl")
self.legBack_L0_fk2_ctl = SelectBtn_RFkBox(biped_body)
self.legBack_L0_fk2_ctl.setGeometry(QtCore.QRect(292, 318, 20, 15))
palette = QtGui.QPalette()
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(59, 255, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(157, 255, 127, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(108, 255, 63, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(29, 127, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(39, 170, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, | |
root)
# (nd has only 1 sibling)
# (nd is leftmost child)
# w--x
# \\/
# nd---y w--x--y
# \\ / \\ | /
# u--v--par --> u--v--par
# \\ | / \\ | /
# z z
if debugging:
print 'del case 4a (%d)' % deleting
y = nd.rsib
w = nd.lchild
x = w
while x.rsib is not None:
x.par = nd.par
x = x.rsib
x.rsib = y
x.par = nd.par
nd.par.lchild = w
nd.clear()
else:
# nd is internal
# (nd.par is rightmost child)
# (nd's parent is NOT the root)
# (nd has only 1 sibling)
# (nd is leftmost child)
# nd---y
# \\ /
# u--v--par --> u--v--y
# \\ | / \\ | /
# z z
if debugging:
print 'del case 4b (%d)' % deleting
y = nd.rsib
v = z.lchild
while v.rsib != nd.par:
v = v.rsib
v.rsib = y
y.par = z
nd.par.clear()
nd.clear()
else:
# nd.par is a middle child
# (nd's parent is NOT the root)
# (nd has only 1 sibling)
# (nd is leftmost child)
# nd---y
# \\ /
# u--par--v --> u--y--v
# \\ | / \\ | /
# z z
if debugging:
print 'del case 5 (%d)' % deleting
y = nd.rsib
u = z.lchild
v = nd.par.rsib
while u.rsib != nd.par:
u = u.rsib
u.rsib = y
y.rsib = v
y.par = z
nd.par.clear()
nd.clear()
elif nd.rsib is None:
# nd is rightmost child
if nd != nd.par.lchild.rsib:
# nd has 2 or more siblings
if nd.lchild is None:
# nd is a leaf
# (nd has 2 or more siblings)
# (nd is rightmost child)
# x--y--nd x---y
# \\ | / --> \ /
# par par
if debugging:
print '*del case 6a (%d)' % deleting
y = nd.par.lchild
while y.rsib != nd:
y = y.rsib
y.rsib = None
nd.clear()
else:
# nd is internal
# (nd has 2 or more siblings)
# (nd is rightmost child)
# a---b
# \\ /
# x--y--nd x-y-a-b
# \\ | / --> \| |/
# par par
if debugging:
print '*del case 6b (%d)' % deleting
a = nd.lchild
a.par = nd.par
b = a.rsib
y = nd.par.lchild
while y.rsib != nd:
y = y.rsib
y.rsib = a
while b is not None:
b.par = nd.par
b = b.rsib
nd.clear()
else:
# nd has 1 sibling
# (nd is rightmost child)
if nd.par.par is None:
# nd's parent is the root
# (nd has 1 sibling)
# (nd is rightmost child)
if nd.lchild is None:
# nd is a leaf
# (nd's parent is the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# y---nd
# \\ /
# par = self. root --> y = self.root
if debugging:
print 'del case 7a (%d)' % deleting
y = nd.par.lchild
y.par = None
self.root = y
nd.par.clear()
nd.clear()
else:
# nd is internal
# (nd's parent is the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# x----y
# \ /
# z---nd z--x--y
# \\ / \\ | /
# par --> par
if debugging:
print 'del case 7b (%d)' % deleting
x = nd.lchild
y = x.rsib
z = nd.par.lchild
z.rsib = x
x.par = nd.par
while y is not None:
y.par = nd.par
y = y.rsib
nd.clear()
else:
# nd's parent is NOT the root
# (nd has 1 sibling)
# (nd is rightmost child)
z = nd.par.par
if nd.par == z.lchild:
# nd.par is the leftmost child
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
if nd.lchild is not None:
# nd is a leaf
# (nd.par is the leftmost child)
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# w---x
# \\ /
# y---nd y--w--x
# \\ / \\ | /
# par--u--v --> par--u--v
# \\ | / \\ | /
# z z
if debugging:
print 'del case 8a (%d)' % deleting
y = nd.par.lchild
w = nd.lchild
x = w
while x.rsib is not None:
x.par = nd.par
x = x.rsib
y.rsib = w
nd.clear()
else:
# nd is internal
# (nd.par is the leftmost child)
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# y---nd
# \\ /
# par--u--v --> y--u--v
# \\ | / \\ | /
# z z
if debugging:
print 'del case 8b (%d)' % deleting
y = nd.par.lchild
u = nd.par.rsib
y.rsib = u
y.par = z
z.lchild = y
nd.par.clear()
nd.clear()
elif nd.par.rsib is None:
# nd.par is the rightmost child
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
if nd.lchild is None:
# nd is a leaf
# (nd.par is the rightmost child)
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# u---nd
# \\ /
# a--b-par --> a--b--u
# \\ | / \\ | /
# z z
if debugging:
print 'del case 9a (%d)' % deleting
u = nd.par.lchild
z = nd.par.par
a = z.lchild
b = a
while b.rsib != nd.par:
b = b.rsib
b.rsib = u
u.rsib = None
u.par = z
nd.par.clear()
nd.clear()
else:
# nd is internal
# (nd.par is the rightmost child)
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# x---y--z
# \\ | /
# u---nd u--v--x--y--z
# \\ / \\ | | | /
# par --> par
# / /
if debugging:
print 'del case 9b (%d)' % deleting
x = nd.lchild
y = x.rsib
v = nd.par.lchild
while v.rsib != nd:
v = v.rsib
v.rsib = x
x.par = nd.par
while y.rsib is not None:
y.par = nd.par
y = y.rsib
nd.clear()
else:
# nd.par is a middle child
# (nd's parent is NOT the root)
# (nd has 1 sibling)
# (nd is rightmost child)
# y---nd
# \\ /
# u--par--v --> u--y--v
# \\ | / \\ | /
# z z
if debugging:
print '*del case 10 (%d)' % deleting
y = nd.par.lchild
u = z.lchild
v = nd.par.rsib
while u.rsib != nd.par:
u = u.rsib
u.rsib = y
y.par = z
y.rsib = v
nd.par.clear()
nd.clear()
else:
# nd is a middle child
if nd.lchild is None:
# nd is a leaf
# (nd is a middle child)
# x--nd--y x---y
# \\ | / --> \ /
# par par
if debugging:
print '*del case 11a (%d)' % deleting
y = nd.rsib
x = nd.par.lchild
while x.rsib != nd:
x = x.rsib
x.rsib = y
nd.clear()
else:
# nd is internal
# (nd is a middle child)
# a-b-c
# \\|/
# x--nd--y x-a-b-c-y
# \\ | / --> \| | |/
# par par
if debugging:
print '*del case 11b (%d)' % deleting
x = nd.par.lchild
while x.rsib != nd:
x = x.rsib
y = nd.rsib
a = nd.lchild
a.par = nd.par
x.rsib = a
c = a.rsib
while c.rsib is not None:
c.par = nd.par
c.par = nd.par
c.rsib = y
nd.clear()
self.calcSplits()
#print 'after newick = ',self.makeNewick()
#print '########## end deleteNode ##########'
def addNodeTo(self, nd, ndnum):
# x----y newnd--x--y
# \\ / --> \\ | /
# nd nd
if debugging:
print 'add case 5'
newnd = Node()
newnd.number = ndnum
newnd.rsib = nd.lchild
newnd.lchild = None
newnd.par = nd
nd.lchild = newnd
self.calcSplits()
return newnd
def addNodeBelow(self, nd, ndnum):
newnd = Node()
newnd.number = ndnum
newpar = Node()
if not nd.par:
# nd newnd-----nd
# \\ /
# --> \\ | |
"""
hash_table.py
Python implementation of the very simple, fixed-array hash table
used for the audfprint fingerprinter.
2014-05-25 <NAME> <EMAIL>
"""
from __future__ import print_function
import numpy as np
import random
import cPickle as pickle
import os, gzip
import scipy.io
import math
# Current format version
HT_VERSION = 20170724
# Earliest acceptable version
HT_COMPAT_VERSION = 20170724
# Earliest version that can be updated with load_old
HT_OLD_COMPAT_VERSION = 20140920
def _bitsfor(maxval):
""" Convert a maxval into a number of bits (left shift).
Raises a ValueError if the maxval is not a power of 2. """
maxvalbits = int(round(math.log(maxval)/math.log(2)))
if maxval != (1 << maxvalbits):
raise ValueError("maxval must be a power of 2, not %d" % maxval)
return maxvalbits
class HashTable(object):
"""
Simple hash table for storing and retrieving fingerprint hashes.
:usage:
>>> ht = HashTable(size=2**10, depth=100)
>>> ht.store('identifier', list_of_landmark_time_hash_pairs)
>>> list_of_ids_tracks = ht.get_hits(hash)
"""
def __init__(self, filename=None, hashbits=20, depth=100, maxtime=16384):
""" allocate an empty hash table of the specified size """
if filename is not None:
self.load(filename)
else:
self.hashbits = hashbits
self.depth = depth
self.maxtimebits = _bitsfor(maxtime)
# allocate the big table
size = 2**hashbits
self.table = np.zeros((size, depth), dtype=np.uint32)
# keep track of number of entries in each list
self.counts = np.zeros(size, dtype=np.int32)
# map names to IDs
self.names = []
# track number of hashes stored per id
self.hashesperid = np.zeros(0, np.uint32)
# Empty params
self.params = {}
# Record the current version
self.ht_version = HT_VERSION
# Mark as unsaved
self.dirty = True
def reset(self):
""" Reset to empty state (but preserve parameters) """
self.table[:,:] = 0
self.counts[:] = 0
self.names = []
self.hashesperid.resize(0)
self.dirty = True
def store(self, name, timehashpairs):
""" Store a list of hashes in the hash table
associated with a particular name (or integer ID) and time.
"""
id_ = self.name_to_id(name, add_if_missing=True)
# Now insert the hashes
hashmask = (1 << self.hashbits) - 1
#mxtime = self.maxtime
maxtime = 1 << self.maxtimebits
timemask = maxtime - 1
# Try sorting the pairs by hash value, for better locality in storing
#sortedpairs = sorted(timehashpairs, key=lambda x:x[1])
sortedpairs = timehashpairs
# Tried making it an np array to permit vectorization, but slower...
#sortedpairs = np.array(sorted(timehashpairs, key=lambda x:x[1]),
# dtype=int)
# Keep only the bottom part of the time value
#sortedpairs[:,0] = sortedpairs[:,0] % self.maxtime
# Keep only the bottom part of the hash value
#sortedpairs[:,1] = sortedpairs[:,1] & hashmask
# The id value is based on (id_ + 1) to avoid an all-zero value.
idval = (id_ + 1) << self.maxtimebits
for time_, hash_ in sortedpairs:
# Keep only the bottom part of the hash value
hash_ &= hashmask
# How many already stored for this hash?
count = self.counts[hash_]
# Keep only the bottom part of the time value
#time_ %= mxtime
time_ &= timemask
# Mixin with ID
val = (idval + time_) #.astype(np.uint32)
if count < self.depth:
# insert new val in next empty slot
#slot = self.counts[hash_]
self.table[hash_, count] = val
else:
# Choose a point at random
slot = random.randint(0, count)
# Only store if random slot wasn't beyond end
if slot < self.depth:
self.table[hash_, slot] = val
# Update record of number of vals in this bucket
self.counts[hash_] = count + 1
# Record how many hashes we (attempted to) save for this id
self.hashesperid[id_] += len(timehashpairs)
# Mark as unsaved
self.dirty = True
def get_entry(self, hash_):
""" Return np.array of [id, time] entries
associate with the given hash as rows.
"""
vals = self.table[hash_, :min(self.depth, self.counts[hash_])]
maxtimemask = (1 << self.matimebits) - 1
# ids we report externally start at 0, but in table they start at 1.
ids = (vals >> self.maxtimebits) - 1
return np.c_[ids, vals & maxtimemask].astype(np.int32)
def get_hits(self, hashes):
""" Return np.array of [id, delta_time, hash, time] rows
associated with each element in hashes array of [time, hash] rows.
This version has get_entry() inlined, it's about 30% faster.
"""
# Allocate to largest possible number of hits
nhashes = np.shape(hashes)[0]
hits = np.zeros((nhashes*self.depth, 4), np.int32)
nhits = 0
maxtimemask = (1 << self.maxtimebits) - 1
hashmask = (1 << self.hashbits) - 1
# Fill in
for ix in xrange(nhashes):
time_ = hashes[ix][0]
hash_ = hashmask & hashes[ix][1]
nids = min(self.depth, self.counts[hash_])
tabvals = self.table[hash_, :nids]
hitrows = nhits + np.arange(nids)
# Make external IDs start from 0.
hits[hitrows, 0] = (tabvals >> self.maxtimebits) - 1
hits[hitrows, 1] = (tabvals & maxtimemask) - time_
hits[hitrows, 2] = hash_
hits[hitrows, 3] = time_
nhits += nids
# Discard the excess rows
hits.resize( (nhits, 4) )
return hits
def save(self, name, params=None, file_object=None):
""" Save hash table to file <name>,
including optional addition params
"""
# Merge in any provided params
if params:
for key in params:
self.params[key] = params[key]
if file_object:
f = file_object
else:
f = gzip.open(name, 'wb')
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
self.dirty = False
nhashes = sum(self.counts)
# Report the proportion of dropped hashes (overfull table)
dropped = nhashes - sum(np.minimum(self.depth, self.counts))
print("Saved fprints for", sum(n is not None for n in self.names),
"files (", nhashes, "hashes) to", name,
"(%.2f%% dropped)" % (100.0*dropped/max(1, nhashes)))
def load(self, name):
""" Read either pklz or mat-format hash table file """
ext = os.path.splitext(name)[1]
if ext == '.mat':
self.load_matlab(name)
else:
self.load_pkl(name)
nhashes = sum(self.counts)
# Report the proportion of dropped hashes (overfull table)
dropped = nhashes - sum(np.minimum(self.depth, self.counts))
print("Read fprints for", sum(n is not None for n in self.names),
"files (", nhashes, "hashes) from", name,
"(%.2f%% dropped)" % (100.0*dropped/max(1, nhashes)))
def load_pkl(self, name, file_object=None):
""" Read hash table values from pickle file <name>. """
if file_object:
f = file_object
else:
f = gzip.open(name, 'rb')
temp = pickle.load(f)
if temp.ht_version < HT_OLD_COMPAT_VERSION:
raise ValueError('Version of ' + name + ' is ' + str(temp.ht_version)
+ ' which is not at least ' +
str(HT_OLD_COMPAT_VERSION))
# assert temp.ht_version >= HT_COMPAT_VERSION
params = temp.params
self.hashbits = temp.hashbits
self.depth = temp.depth
if hasattr(temp, 'maxtimebits'):
self.maxtimebits = temp.maxtimebits
else:
self.maxtimebits = _bitsfor(temp.maxtime)
if temp.ht_version < HT_COMPAT_VERSION:
# Need to upgrade the database.
print("Loading database version", temp.ht_version,
"in compatibility mode.")
# Offset all the nonzero bins with one ID count.
temp.table += np.array(1 << self.maxtimebits).astype(np.uint32) * (
temp.table != 0)
temp.ht_version = HT_VERSION
self.table = temp.table
self.ht_version = temp.ht_version
self.counts = temp.counts
self.names = temp.names
self.hashesperid = np.array(temp.hashesperid).astype(np.uint32)
self.dirty = False
self.params = params
def load_matlab(self, name):
""" Read hash table from version saved by Matlab audfprint.
:params:
name : str
filename of .mat matlab fp dbase file
:side_effects:
Sets up attributes of self including
params : dict
dictionary of parameters from the Matlab file including
'mat_version' : float
version read from Matlab file (must be >= 0.90)
'hoptime' : float
hoptime read from Matlab file (must be 0.02322)
'targetsr' : float
target sampling rate from Matlab file (must be 11025)
"""
mht = scipy.io.loadmat(name)
params = {}
params['mat_version'] = mht['HT_params'][0][0][-1][0][0]
assert params['mat_version'] >= 0.9
self.hashbits = _bitsfor(mht['HT_params'][0][0][0][0][0])
self.depth = mht['HT_params'][0][0][1][0][0]
self.maxtimebits = _bitsfor(mht['HT_params'][0][0][2][0][0])
params['hoptime'] = mht['HT_params'][0][0][3][0][0]
params['targetsr'] = mht['HT_params'][0][0][4][0][0]
params['nojenkins'] = mht['HT_params'][0][0][5][0][0]
# Python doesn't support the (pointless?) jenkins hashing
assert params['nojenkins']
self.table = mht['HashTable'].T
self.counts = mht['HashTableCounts'][0]
self.names = [str(val[0]) if len(val) > 0 else []
for val in mht['HashTableNames'][0]]
self.hashesperid = np.array(mht['HashTableLengths'][0]).astype(uint32)
# Matlab uses 1-origin for the IDs in the hashes, but the Python code
# also skips using id_ 0, so that names[0] corresponds to id_ 1.
# Otherwise unmodified database
self.dirty = False
self.params = params
def totalhashes(self):
""" Return the total count of hashes stored in the table """
return np.sum(self.counts)
def merge(self, ht):
""" Merge in the results from another hash table """
# All the items go into our table, offset by our current size
# Check compatibility
assert self.maxtimebits == ht.maxtimebits
ncurrent = len(self.names)
#size = len(self.counts)
self.names += ht.names
self.hashesperid = np.append(self.hashesperid, ht.hashesperid)
# Shift all the IDs in the second table down by ncurrent
idoffset = (1 << self.maxtimebits) * ncurrent
for hash_ in np.nonzero(ht.counts)[0]:
| |
object_info.initial_detrend_period is not None:
self.rotator_period = object_info.initial_detrend_period
elif self.auto_detrend_periodic_signals:
self.rotator_period = self.__calculate_max_significant_period(lc, periodogram)
if self.rotator_period is not None:
logging.info('================================================')
logging.info('AUTO-DETREND EXECUTION')
logging.info('================================================')
logging.info("Period = %.3f", self.rotator_period)
lc.fold(self.rotator_period).scatter()
plt.title("Phase-folded period: " + format(self.rotator_period, ".2f") + " days")
plt.savefig(object_dir + "Phase_detrend_period_" + str(sherlock_id) + "_" + format(self.rotator_period, ".2f") + "_days.png")
plt.clf()
flatten_flux, lc_trend = self.__detrend_by_period(clean_time, flatten_flux, self.rotator_period * self.auto_detrend_ratio)
if not self.period_min:
self.period_min = self.rotator_period * 4
logging.info("Setting Min Period to %.3f", self.period_min)
if object_info.initial_mask is not None:
logging.info('================================================')
logging.info('INITIAL MASKING')
logging.info('================================================')
initial_mask = object_info.initial_mask
logging.info('** Applying ordered masks to the lightcurve **')
for mask_range in initial_mask:
mask = [(clean_time < mask_range[0] if not math.isnan(mask_range[1]) else False) |
(clean_time > mask_range[1] if not math.isnan(mask_range[1]) else False)]
clean_time = clean_time[mask]
flatten_flux = flatten_flux[mask]
return clean_time, flatten_flux, clean_flux_err, star_info, transits_min_count, cadence, \
sectors if sectors is not None else quarters
def __clean_initial_flux(self, object_info, time, flux, flux_err, star_info, cadence):
clean_time = time
clean_flux = flux
clean_flux_err = flux_err
is_short_cadence = round(cadence) <= 5
if self.user_prepare is not None:
clean_time, clean_flux, clean_flux_err = self.user_prepare.prepare(object_info, clean_time, clean_flux, clean_flux_err)
if (is_short_cadence and self.initial_smooth) or (self.initial_rms_mask and object_info.initial_mask is None):
logging.info('================================================')
logging.info('INITIAL FLUX CLEANING')
logging.info('================================================')
if self.initial_rms_mask and object_info.initial_mask is None:
logging.info('Masking high RMS areas by a factor of %.2f with %.1f hours binning',
self.initial_rms_threshold, self.initial_rms_bin_hours)
bins_per_day = 24 / self.initial_rms_bin_hours
before_flux = clean_flux
fig, axs = plt.subplots(2, 1, figsize=(8, 8), constrained_layout=True)
axs[1].scatter(time, before_flux, color='gray', alpha=0.5, rasterized=True, label="Flux")
bins = (clean_time[len(clean_time) - 1] - clean_time[0]) * bins_per_day
bin_stds, bin_edges, binnumber = stats.binned_statistic(clean_time, clean_flux, statistic='std', bins=bins)
stds_median = np.nanmedian(bin_stds[bin_stds > 0])
stds_median_array = np.full(len(bin_stds), stds_median)
rms_threshold_array = stds_median_array * self.initial_rms_threshold
too_high_bin_stds_indexes = np.argwhere(bin_stds > rms_threshold_array)
high_std_mask = np.array([bin_id - 1 in too_high_bin_stds_indexes for bin_id in binnumber])
clean_time = clean_time[~high_std_mask]
clean_flux = clean_flux[~high_std_mask]
clean_flux_err = flux_err[~high_std_mask]
bin_width = (bin_edges[1] - bin_edges[0])
bin_centers = bin_edges[1:] - bin_width / 2
axs[0].plot(bin_centers, bin_stds, color='black', alpha=0.75, rasterized=True, label="RMS")
axs[0].plot(bin_centers, rms_threshold_array, color='red', rasterized=True, label='Mask Threshold')
axs[0].set_title(str(self.initial_rms_bin_hours) + " hours binned RMS")
axs[0].legend(loc="upper right")
axs[1].scatter(time[high_std_mask], before_flux[high_std_mask], linewidth=1, color='red', alpha=1.0, label="High RMS")
axs[1].legend(loc="upper right")
axs[1].set_title("Total and masked high RMS flux")
fig.suptitle(str(star_info.object_id) + " High RMS Mask")
axs[0].set_xlabel('Time')
axs[0].set_ylabel('Flux RMS')
axs[1].set_xlabel('Time')
axs[1].set_ylabel('Flux')
plot_dir = self.__init_object_dir(star_info.object_id)
fig.savefig(plot_dir + 'High_RMS_Mask_' + str(star_info.object_id) + '.png', dpi=200)
fig.clf()
if is_short_cadence and self.initial_smooth:
logging.info('Applying Savitzky-Golay filter')
clean_flux = savgol_filter(clean_flux, 11, 3)
#clean_flux = uniform_filter1d(clean_flux, 11)
#clean_flux = self.flatten_bw(self.FlattenInput(clean_time, clean_flux, 0.02))[0]
return clean_time, clean_flux, clean_flux_err
def __calculate_max_significant_period(self, lc, periodogram):
#max_accepted_period = (lc.time[len(lc.time) - 1] - lc.time[0]) / 4
max_accepted_period = np.float64(10)
# TODO related to https://github.com/franpoz/SHERLOCK/issues/29 check whether this fits better
max_power_index = np.argmax(periodogram.power)
period = periodogram.period[max_power_index]
if max_power_index > 0.0008:
period = period.value
logging.info("Auto-Detrend found the strong period: " + str(period) + ".")
else:
logging.info("Auto-Detrend did not find relevant periods.")
period = None
return period
def __detrend_by_period(self, time, flux, period_window):
if self.auto_detrend_method == 'gp':
flatten_lc, lc_trend = flatten(time, flux, method=self.detrend_method, kernel='matern',
kernel_size=period_window, return_trend=True, break_tolerance=0.5)
else:
flatten_lc, lc_trend = flatten(time, flux, window_length=period_window, return_trend=True,
method=self.auto_detrend_method, break_tolerance=0.5)
return flatten_lc, lc_trend
def __analyse(self, object_info, time, lcs, flux_err, star_info, id_run, transits_min_count, cadence, report, wl):
logging.info('=================================')
logging.info('SEARCH OF SIGNALS - Run %s', id_run)
logging.info('=================================')
transit_results = self.__identify_signals(object_info, time, lcs, flux_err, star_info, transits_min_count, wl, id_run, cadence, report)
signal_selection = self.signal_score_selectors[self.best_signal_algorithm]\
.select(transit_results, self.snr_min, self.detrend_method, wl)
logging.info(signal_selection.get_message())
return transit_results, signal_selection
def __detrend(self, time, lc, star_info):
wl_min = self.wl_min[star_info.object_id]
wl_max = self.wl_max[star_info.object_id]
bins = len(time) * 2 / self.bin_minutes
bin_means, bin_edges, binnumber = stats.binned_statistic(time, lc, statistic='mean', bins=bins)
logging.info('=================================')
logging.info('MODELS IN THE DETRENDING')
logging.info('=================================')
logging.info("%-25s%-17s%-15s%-11s%-15s", "light_curve", "Detrend_method", "win/ker_size", "RMS (ppm)",
"RMS_10min (ppm)")
logging.info("%-25s%-17s%-15s%-11.2f%-15.2f", "PDCSAP_FLUX", "---", "---", np.std(lc) * 1e6,
np.std(bin_means[~np.isnan(bin_means)]) * 1e6)
wl_step = (wl_max - wl_min) / self.n_detrends
wl = np.arange(wl_min, wl_max, wl_step) # we define all the posibles window_length that we apply
final_lcs = np.zeros((len(wl), len(lc)))
## save in a plot all the detrendings and all the data to inspect visually.
figsize = (8, 8) # x,y
rows = self.detrend_plot_axis[self.n_detrends - 1][0]
cols = self.detrend_plot_axis[self.n_detrends - 1][1]
shift = 2 * (1.0 - (np.min(lc))) # shift in the between the raw and detrended data
fig, axs = plt.subplots(rows, cols, figsize=figsize, constrained_layout=True)
if self.n_detrends > 1:
axs = self.__trim_axs(axs, len(wl))
flatten_inputs = []
flattener = Flattener()
if self.detrend_cores > 1:
for i in range(0, len(wl)):
flatten_inputs.append(FlattenInput(time, lc, wl[i], self.bin_minutes))
if self.detrend_method == 'gp':
flatten_results = self.run_multiprocessing(self.run_cores, flattener.flatten_gp, flatten_inputs)
else:
flatten_results = self.run_multiprocessing(self.run_cores, flattener.flatten_bw, flatten_inputs)
else:
flatten_results = []
for i in range(0, len(wl)):
if self.detrend_method == 'gp':
flatten_results.append(flattener.flatten_gp(FlattenInput(time, lc, wl[i], self.bin_minutes)))
else:
flatten_results.append(flattener.flatten_bw(FlattenInput(time, lc, wl[i], self.bin_minutes)))
i = 0
plot_axs = axs
for flatten_lc_detrended, lc_trend, bin_centers, bin_means, flatten_wl in flatten_results:
if self.n_detrends > 1:
plot_axs = axs[i]
final_lcs[i] = flatten_lc_detrended
logging.info("%-25s%-17s%-15.4f%-11.2f%-15.2f", 'flatten_lc & trend_lc ' + str(i), self.detrend_method,
flatten_wl, np.std(flatten_lc_detrended) * 1e6, np.std(bin_means[~np.isnan(bin_means)]) * 1e6)
if self.detrend_method == 'gp':
plot_axs.set_title('ks=%s' % str(np.around(flatten_wl, decimals=4)))
else:
plot_axs.set_title('ws=%s' % str(np.around(flatten_wl, decimals=4)))
plot_axs.plot(time, lc, linewidth=0.05, color='black', alpha=0.75, rasterized=True)
plot_axs.plot(time, lc_trend, linewidth=1, color='orange', alpha=1.0)
i = i + 1
plot_dir = self.__init_object_dir(star_info.object_id)
plt.savefig(plot_dir + 'Detrends_' + str(star_info.object_id) + '.png', dpi=200)
fig.clf()
plt.close(fig)
return final_lcs, wl
def __identify_signals(self, object_info, time, lcs, flux_err, star_info, transits_min_count, wl, id_run, cadence, report):
detrend_logging_customs = 'ker_size' if self.detrend_method == 'gp' else "win_size"
logging.info("%-12s%-10s%-10s%-8s%-18s%-14s%-14s%-12s%-12s%-14s%-16s%-14s%-12s%-25s%-10s%-18s%-20s",
detrend_logging_customs, "Period", "Per_err", "N.Tran", "Mean Depth (ppt)", "T. dur (min)", "T0",
"SNR", "SDE", "FAP", "Border_score", "Matching OI", "Harmonic", "Planet radius (R_Earth)", "Rp/Rs",
"Semi-major axis", "Habitability Zone")
transit_results = {}
object_dir = self.__init_object_dir(object_info.sherlock_id())
run_dir = self.__init_object_run_dir(object_info.sherlock_id(), id_run)
lc_df = pandas.DataFrame(columns=['#time', 'flux', 'flux_err'])
args = np.argwhere(~np.isnan(lcs[0])).flatten()
lc_df['#time'] = time[args]
lc_df['flux'] = lcs[0][args]
lc_df['flux_err'] = flux_err[args]
lc_df.to_csv(run_dir + "/lc_0.csv", index=False)
transit_result = self.__adjust_transit(time, lcs[0], star_info, transits_min_count, transit_results, report, cadence)
transit_results[0] = transit_result
r_planet = self.__calculate_planet_radius(star_info, transit_result.depth)
rp_rs = transit_result.results.rp_rs
a, habitability_zone = self.habitability_calculator \
.calculate_hz_score(star_info.teff, star_info.mass, star_info.lum, transit_result.period)
oi = self.__find_matching_oi(object_info, transit_result.period)
logging.info('%-12s%-10.5f%-10.6f%-8s%-18.3f%-14.1f%-14.4f%-12.3f%-12.3f%-14s%-16.2f%-14s%-12s%-25.5f%-10.5f%-18.5f%-20s',
"PDCSAP_FLUX", transit_result.period,
transit_result.per_err, transit_result.count, transit_result.depth,
transit_result.duration * 24 * 60, transit_result.t0, transit_result.snr, transit_result.sde,
transit_result.fap, transit_result.border_score, oi, transit_result.harmonic, r_planet, rp_rs, a,
habitability_zone)
plot_title = 'Run ' + str(id_run) + 'PDCSAP_FLUX # P=' + \
format(transit_result.period, '.2f') + 'd # T0=' + format(transit_result.t0, '.2f') + \
' # Depth=' + format(transit_result.depth, '.4f') + ' # Dur=' + \
format(transit_result.duration * 24 * 60, '.0f') + 'm # SNR:' + \
str(format(transit_result.snr, '.2f')) + ' # SDE:' + str(format(transit_result.sde, '.2f')) + \
' # FAP:' + format(transit_result.fap, '.6f')
plot_file = 'Run_' + str(id_run) + '_PDCSAP-FLUX_' + str(star_info.object_id) + '.png'
self.__save_transit_plot(star_info.object_id, plot_title, plot_file, time, lcs[0], transit_result, cadence,
id_run)
for i in range(1, len(wl)):
lc_df = pandas.DataFrame(columns=['#time', 'flux', 'flux_err'])
args = np.argwhere(~np.isnan(lcs[i])).flatten()
lc_df['#time'] = time[args]
lc_df['flux'] = lcs[i][args]
lc_df['flux_err'] = flux_err[args]
lc_df.to_csv(run_dir + "/lc_" + str(i) + ".csv", index=False)
transit_result = self.__adjust_transit(time, lcs[i], star_info, transits_min_count, transit_results, report, cadence)
transit_results[i] = transit_result
r_planet = self.__calculate_planet_radius(star_info, transit_result.depth)
rp_rs = transit_result.results.rp_rs
a, habitability_zone = self.habitability_calculator \
.calculate_hz_score(star_info.teff, star_info.mass, star_info.lum, transit_result.period)
oi = self.__find_matching_oi(object_info, transit_result.period)
logging.info('%-12.4f%-10.5f%-10.6f%-8s%-18.3f%-14.1f%-14.4f%-12.3f%-12.3f%-14s%-16.2f%-14s%-12s%-25.5f%-10.5f%-18.5f%-20s',
wl[i], transit_result.period,
transit_result.per_err, transit_result.count, transit_result.depth,
transit_result.duration * 24 * 60, transit_result.t0, transit_result.snr, transit_result.sde,
transit_result.fap, transit_result.border_score, oi, transit_result.harmonic, r_planet, rp_rs, a,
habitability_zone)
detrend_file_title_customs = 'ker_size' if self.detrend_method == 'gp' else 'win_size'
detrend_file_name_customs = 'ks' if self.detrend_method == 'gp' else 'ws'
title = 'Run ' + str(id_run) + '# ' + detrend_file_title_customs + ':' + str(format(wl[i], '.4f')) + \
' # P=' + format(transit_result.period, '.2f') + 'd # T0=' + \
format(transit_result.t0, '.2f') + ' # Depth=' + format(transit_result.depth, '.4f') + " # Dur=" + \
format(transit_result.duration * 24 * 60, '.0f') + 'm # SNR:' + \
str(format(transit_result.snr, '.2f')) + ' # SDE:' + str(format(transit_result.sde, '.2f')) + \
' # FAP:' + format(transit_result.fap, '.6f')
file = 'Run_' + str(id_run) + '_' + detrend_file_name_customs + '=' + str(format(wl[i], '.4f')) + '_' + \
str(star_info.object_id) + '.png'
self.__save_transit_plot(star_info.object_id, title, file, time, lcs[i], transit_result, cadence, id_run)
return transit_results
def __find_matching_oi(self, object_info, period):
if self.ois is not None:
existing_period_in_object = self.ois[(self.ois["Object Id"] == object_info.mission_id()) &
(0.95 < self.ois["Period (days)"] / period) &
(self.ois["Period (days)"] / period < 1.05)]
existing_period_in_oi = existing_period_in_object[existing_period_in_object["OI"].notnull()]
oi = existing_period_in_oi["OI"].iloc[0] if len(
existing_period_in_oi.index) > 0 else np.nan
else:
oi = ""
return oi
def __adjust_transit(self, time, lc, star_info, transits_min_count, run_results, report, cadence):
model = tls.transitleastsquares(time, lc)
| |
#!/usr/bin/python3
import threading, queue, time
import picamera.array as picamarray, numpy, pathlib
import numpy.ma as nma
import png, io
from pootlestuff import watchables as wv
class piCamCPU(wv.watchablesmart):
"""
a base class for things that want to analyse images in detail for movement detection, exposure adjustment or anything else.
It uses picamera to resize frames (to reduce processing load and reduce noise, pulls out each frame and passes it to
an analyser.
"""
def __init__(self, statusvals, wabledefs, startbtnvals, loglevel=wv.loglvls.INFO, **kwargs):
assert hasattr(self, 'monitor')
super().__init__(wabledefs=[
('status', wv.enumWatch, statusvals[0], False, {'vlist': statusvals}),
('startstopbtn',wv.enumWatch, startbtnvals[0], False, {'vlist': startbtnvals}),
('autostart', wv.enumWatch, 'off', True, {'vlist': ('off', 'on')}),
('width', wv.intWatch, 128, True, {'minv': 8, 'maxv': 800}),
('height', wv.intWatch, 96, True, {'minv': 6, 'maxv': 600}),
('lastactive', wv.floatWatch, float('nan'), False),
('imagemode', wv.enumWatch, 'rgb', True, {'vlist': ('rgb', 'yuv')}),
('imagechannel',wv.enumWatch, '0', True, {'vlist': ('0','1','2', '*')}),
('skippedcount',wv.intWatch, 0, False),
('analysedcount',wv.intWatch, 0, False),
]+wabledefs,
**kwargs)
self.agentclass=self.app.agentclass
self.monthread=None
self.procthread=None
self.loglevel=loglevel
if self.autostart.getIndex()==1:
self.startstopbtn.setIndex(1,wv.myagents.app)
self.running=True
self.monthread=threading.Thread(name=type(self).__name__, target=self.monitor, kwargs={'startdelay':2.5})
self.monthread.start()
self.startstopbtn.addNotify(self.do_startstop, wv.myagents.user)
def do_startstop(self, watched, agent, newValue, oldValue):
"""
called when the users clicks the start / stop button
to start running detection, run up a thread on the 'monitor' function of this object
"""
btnstate=watched.getIndex()
if self.monthread==None and btnstate==1:
self.running=True
self.monthread=threading.Thread(name=type(self).__name__, target=self.monitor)
self.monthread.start()
elif not self.monthread==None and btnstate==0:
self.running=False
else:
self.log(wv.loglvls.WARN,' inconsistent move detection states running is %s and button was %s' % (self.running, oldValue))
def preparearray(self):
"""
prepares / updates a numpy array or masked array dependent on various variables
"""
nshape=[self.height.getValue(),self.width.getValue()]
if self.imagechannel.getIndex() == 3:
nspage.append(3)
return numpy.empty(shape=nshape, dtype=numpy.int16)
def monitor(self, startdelay=0):
"""
This function coordinates cpu based movement detection, it runs in its own thread within the loop of a picamera.capture_sequence call
until self.running is set False.
buffercycle (a generator) runs in a loop to process each frame.
This also starts another thread to analyse successive frames from the camera, this thread uses a threadsafe queue (which only ever has 1 entry)
to trigger analysis (if analysis still running when next frame arrives, it is discarded)
"""
if startdelay > 0:
time.sleep(startdelay)
self.status.setIndex(1, self.agentclass.app)
self.lastactive.setValue(time.time(), self.agentclass.app)
picam=self.app.startCamera()
resize=((self.width.getValue()+31) // 32 * 32, (self.height.getValue()+15) // 16 * 16)
self.freebuffs=queue.Queue()
arraytype=picamarray.PiRGBArray if self.imagemode.getValue()=='rgb' else picamarray.PiYUVArray
for i in range(3):
self.freebuffs.put(arraytype(picam, size=resize))
self.camerabuff=None # the buffer currently being filled
self.pendingbuffs=queue.Queue(maxsize=1) # and a queue of buffers we want to analyse - restricted to 1 - just using threadsafeness
splitter_port=self.app._getSplitterPort(type(self).__name__)
self.log(wv.loglvls.INFO, 'cpu move detect using port %d and image size %s' % (splitter_port, resize))
time.sleep(.1)
self.condition=None # used to trigger detection overlay streaming
self.analthread=threading.Thread(name='cpuanalyse', target=self.analysethread)
self.analthread.start()
picam.capture_sequence(self.buffercycle(),
format='rgb' if self.imagemode.getValue()=='rgb' else 'yuv',
resize=resize, splitter_port=splitter_port, use_video_port=True)
self.camerabuff=None
self.pendingbuffs=None
self.freebuffs=None
self.app._releaseSplitterPort(type(self).__name__, splitter_port)
self.lastactive.setValue(time.time(), self.agentclass.app)
self.monthread=None
self.analthread.join()
self.analthread=None
self.status.setIndex(0, self.agentclass.app)
def buffercycle(self):
"""
This generator function is used by picamera.capture_sequence to yield buffers to capture_sequence.
A small pool of buffers is used, and each time it runs round the loop it records the last filled buffer so
the analyse thread can pick up the latest frame whenever it is ready.
"""
try:
while self.running:
try:
nextbuff=self.freebuffs.get_nowait()
except queue.Empty:
nextbuff=None
if nextbuff is None:
self.overruns.increment(agent=self.agentclass.app)
time.sleep(.2)
try:
nextbuff=self.freebuffs.get_nowait()
except queue.Empty:
raise StopIteration()
self.log(wv.loglvls.ERROR,'irrecoverable buffer overflow')
prevbuff=self.camerabuff
self.camerabuff=nextbuff
if not prevbuff is None:
try:
expiredbuff=self.pendingbuffs.get_nowait()
expiredbuff.truncate(0)
self.freebuffs.put(expiredbuff)
self.skippedcount.increment(agent=self.agentclass.app)
except queue.Empty:
pass
self.pendingbuffs.put_nowait(prevbuff)
yield nextbuff
except:
self.log(wv.loglvls.DEBUG,'move detect thread problem!', exc_info=True)
def analysethread(self):
prevbuff=None
clocktimestart=time.time()
cputimestart=time.clock()
busytime=0
busystart=time.time()
tick5=busystart+5
logpal=None
logpng=None
detstreamcount=0
channel=self.imagechannel.getIndex()
workarray=None
while self.running:
try:
busytime+=time.time()-busystart
thisbuff=self.pendingbuffs.get(block=True, timeout=2)
busystart=time.time()
except queue.Empty:
thisbuff=None
if not thisbuff is None:
thisbuff, prevbuff, workarray = self.analysebuff(thisbuff, prevbuff, workarray, channel)
prevbuff=thisbuff
if time.time() > tick5:
elapsed=time.time()-clocktimestart
self.analcpu.setValue(100*(time.clock()-cputimestart)/elapsed,self.agentclass.app)
self.analbusy.setValue(100*busytime/elapsed, self.agentclass.app)
tick5+=5
if self.condition:
try:
self.condition.notify_all() # release clients one last time
except:
pass
self.condition=None
class MoveDetectCPU(piCamCPU):
"""
This class analyses successive frames and looks for significant change, setting its 'triggered' watchable True when movement is detected.
This remains True until all frames for 'latchtime' have not detected movement. Anything wanting to be triggered can poll or set a notification
on this watchable.
The code uses picamera to resize the frames (which happens in the GPU) to a (typically) much smaller size for analysis in this thread.
Initially this class just sets up a bunch of variables that control and monitor this functionality. When detection is active, it runs a monitor thread
to drive the camera and grab frames, and a further thread to actually analyse the frames.
The mpnitor thread creates a small number of buffers and uses picamera.capture_sequence to run the camera, the capture_sequence call does not return
until an external event causes capture sequence to stop.
The buffers are allocated and managed by the member function buffercycle which is called from within picamera.capture_sequence. 'buffercycle' uses 'yield'
to give a free buffer back to the camera, and passes places the buffer just filled to be ready for the analysis thread to use. If there was already a buffer
waiting for analysis this expired buffer is returned to the free list and replaced by the more recent buffer, if the analysis thread has grabbed the previous
buffer, the analysis thread returns it to the queue when it has dealt with it.
Starting with the second buffer, the analysis thread picks 1 channel from the buffer and compares it with previous frame to check for differences.
"""
def __init__(self, statusvals=('off', 'watching', 'triggered'), startbtnvals=('start watching', 'stop watching'), **kwargs):
"""
initialisation just sets up the vars used.
"""
super().__init__(statusvals=statusvals, startbtnvals=startbtnvals, wabledefs=[
('triggercount', wv.intWatch, 0, False),
('lasttrigger', wv.floatWatch, float('nan'), False),
('cellthresh', wv.intWatch, 22, True, {'minv': 1, 'maxv': 255}),
('celltrigcount', wv.intWatch, 100, True, {'minv': 1}),
('latchtime', wv.floatWatch, 4, True),
('maskfold', wv.folderWatch, '~/camfiles/masks', True),
('maskfile', wv.textWatch, '-off-', True),
('overruns', wv.intWatch, 0, False),
('analbusy', wv.floatWatch, 0, False),
('analcpu', wv.floatWatch, 0, False),
], **kwargs)
self.running=False
def fetchmasksize(self):
"""
called from web server to retrieve info about mask in preparation for editing
"""
rr={'width' : self.width.getValue(),
'height' : self.height.getValue(),
}
return rr
def savemask(self, pathinf, name, mask):
"""
called from webserver when user saves a mask after editing
"""
mfile=(self.maskfold.getFolder()/name).with_suffix('.png')
print('savemask (%3d/%3d) to %s (%s): ' % (len(mask[0]), len(mask), name, mfile))
pw = png.Writer(len(mask[0]), len(mask), greyscale=True, bitdepth=1)
with mfile.open('wb') as fff:
pw.write(fff,mask)
return {'resp': 200, 'rdata':{'message': 'saved to %s' % mfile}}
def checkmask(self, var, agent, newValue, oldValue):
pass
def preparearray(self):
"""
prepares / updates a numpy array or masked array dependent on various variables
"""
if True:
return super().preparearray()
if self.maskfile.getValue()=='-off-':
return dataarray
else:
mfile=(self.maskfold.getValue()/self.maskfile.getValue()).with_suffix('.png')
if mfile.is_file():
with mfile.open('rb') as mfo:
mwidth, mheight, mrows, minfo = png.Reader(file=mfo).read()
rowdat=[m for m in mrows]
if mwidth==self.width.getValue() and mheight==self.height.getValue():
if minfo['planes']==1 and minfo['bitdepth']==1:
mask=numpy.array(rowdat,dtype=numpy.bool_)
self.log(wv.loglvls.INFO,'mask updated from %s %d of %d masked' % (str(mfile), len(numpy.nonzero(mask)[0]), mask.shape[0]*mask.shape[1]))
return nma.masked_array(data=dataarray, mask=mask)
else:
self.log(wv.loglvls.INFO, 'mask file has %d planes and bitdepth %d: should be 1 and 1' % (minfo.planes, minfo.bit_depth))
else:
self.log(wv.loglvls.INFO,'mask image is wrong size - expected (%3d/%3d), file has (%3d/%3d)' % (self['width'].getValue(), self['height'].getValue(), mwidth, mheight))
else:
self.log(wv.loglvls.INFO, 'unable to get maskfile %s' % str(maskfile))
return dataarray
def analysebuff(self, thisbuff, prevbuff, workarray, channel):
if prevbuff is None:
workarray=self.preparearray()
else:
logthresh=self. cellthresh.getValue()
if channel == 3:
numpy.copyto(workarray, thisbuff.array)
workarray -= prevbuff.array
else:
numpy.copyto(workarray, thisbuff.array[:,:,channel])
workarray -= prevbuff.array[:,:,channel]
numpy.absolute(workarray, workarray)
cthresh=self.cellthresh.getValue()
if logthresh != cthresh:
logthresh = cthresh
logpal=None
hits=(workarray >= logthresh).nonzero()
trig=len(hits[0]) >=self.celltrigcount.getValue()
if trig:
if self.status.getIndex() < 2:
self.triggercount.increment(agent=self.agentclass.app)
self.status.setIndex(2, agent=self.agentclass.app)
self.lasttrigger.setValue(time.time(), agent=self.agentclass.app)
else:
if self.status.getIndex() > 1 and time.time() > (self.lasttrigger.getValue() + self.latchtime.getValue()):
self.status.setIndex(1, agent=self.agentclass.app)
if not self.condition is None: # check if we're streaming the detection overlay
if self.laststreamactive+5 < time.time():
# client(s) all gone - stop the stream
print('clients gone - shut detect stream')
self.condition=None
logpal=None
streaming=None
logpng=None
else:
if logpal is None:
logpal=makepalette(logthresh)
streamimg = io.BytesIO()
arshape=workarray.shape
if logpng is None:
logpng = png.Writer(arshape[1], arshape[0], palette=logpal)
detimg=workarray.filled(fill_value=0) if hasattr(workarray, 'filled') else workarray
if trig and not detoverlay['xbase'] is None: # check if we want a blob
xb=detoverlay['xbase']
yb=detoverlay['ybase']
if abs(xb) < self.width.getValue() and abs(yb) < self.height.getValue():
if xb | |
refresh
trialMouse.status = STARTED
prevButtonState = [0, 0, 0] # if now button is down we will treat as 'new' click
if trialMouse.status == STARTED: # only update if started and not finished!
x, y = trialMouse.getPos()
trialMouse.x.append(x)
trialMouse.y.append(y)
buttons = trialMouse.getPressed()
trialMouse.leftButton.append(buttons[0])
trialMouse.midButton.append(buttons[1])
trialMouse.rightButton.append(buttons[2])
trialMouse.time.append(trialMouse.mouseClock.getTime())
CursorTargetDistance = sqrt((trialCursor.pos[0]-trialTarget.pos[0])**2 + (trialCursor.pos[1]-trialTarget.pos[1])**2)
CursorHomeDistance = sqrt(trialCursor.pos[0]**2 + trialCursor.pos[1]**2)
steps.append(trialStep)
# steps.push(step)
if counter == 1:
cursorPosX = trialCursor.pos[0]
CursorPosY = trialCursor.pos[1]
if not(homeStart):
homeOpacity = 1
targetOpacity = 0
trialStep = 1
if (CursorHomeDistance < .05):
homeStart = True
print('end step 1'+' ('+str(globalClock.getTime())+')')
if (not(reachOut) and homeStart):
homeOpacity = 0
targetOpacity = 1
trialStep = 2
if (CursorTargetDistance < .05):
reachOut = True
print('end step 2'+' ('+str(globalClock.getTime())+')')
if (reachOut):
homeOpacity = 1
targetOpacity = 0
trialStep = 3
if (CursorHomeDistance < .05):
# maybe this ends the loop prematurely?
print('end step 3'+' ('+str(globalClock.getTime())+')')
continueRoutine = False
#steps = steps.append(step)
if counter == 2:
cursorPosX = sqrt((trial4Mouse.getPos()[0]**2)+(trial4Mouse.getPos()[1]**2))*(cos(theta))
CursorPosY = sqrt((trial4Mouse.getPos()[0]**2)+(trial4Mouse.getPos()[1]**2))*(sin(theta))
if step == 2:
theta = (targetangle / 180) * pi
rx = dist * cos(theta)
ry = dist * sin(theta)
radius = 1
else:
if dist > 1:
rx = homex
ry = homey
radius = dist
diameter = 2*dist
else:
rx = xmouse
ry = ymouse
radius = 1
if not(homeStart):
homeOpacity = 1
targetOpacity = 0
trialStep = 1
bufferOpacity = 0
cursorOpacity = 1
if (CursorHomeDistance < .075):
homeStart = True
print('end step 1'+' ('+str(globalClock.getTime())+')')
if (not(reachOut) and homeStart):
homeOpacity = 0
targetOpacity = 1
trialStep = 2
bufferOpacity = 0
cursorOpacity = 1
if (CursorTargetDistance < .025):
reachOut = True
print('end step 2'+' ('+str(globalClock.getTime())+')')
if (reachOut):
homeOpacity = 1
targetOpacity = 0
trialStep = 3
#COntrols the 'buffer'
bufferOpacity = 1
bufferRadius = (sqrt(trialCursor.pos[0]**2 + trialCursor.pos[1]**2))
#controls the cursor
cursorOpacity = 0
if (CursorHomeDistance < .2):
cursorOpacity = 1
if (CursorHomeDistance < .075):
# maybe this ends the loop prematurely?
print('end step 3'+' ('+str(globalClock.getTime())+')')
continueRoutine = False
#steps = steps.append(step)
if counter == 3 or counter == 7:
cursorPosX = trialCursor.pos[0]
CursorPosY = trialCursor.pos[1]
if not(homeStart):
homeOpacity = 1
targetOpacity = 0
trialStep = 1
if (CursorHomeDistance < .05):
homeStart = True
print('end step 1'+' ('+str(globalClock.getTime())+')')
if (not(reachOut) and homeStart):
homeOpacity = 0
targetOpacity = 1
trialStep = 2
if (CursorTargetDistance < .05):
reachOut = True
print('end step 2'+' ('+str(globalClock.getTime())+')')
if (reachOut):
homeOpacity = 1
targetOpacity = 0
trialStep = 3
if (CursorHomeDistance < .05):
# maybe this ends the loop prematurely?
print('end step 3'+' ('+str(globalClock.getTime())+')')
continueRoutine = False
#steps = steps.append(step)
if counter == 4 or counter == 8:
cursorPosX = (trialMouse.getPos()[0]*cos(rtd))-(trialMouse.getPos()[1]*sin(rtd))
CursorPosY = (trialMouse.getPos()[0]*sin(rtd))+(trialMouse.getPos()[1]*cos(rtd))
if not(homeStart):
homeOpacity = 1
targetOpacity = 0
trialStep = 1
if (CursorHomeDistance < .05):
homeStart = True
print('end step 1'+' ('+str(globalClock.getTime())+')')
if (not(reachOut) and homeStart):
homeOpacity = 0
targetOpacity = 1
trialStep = 2
if (CursorTargetDistance < .05):
reachOut = True
print('end step 2'+' ('+str(globalClock.getTime())+')')
if (reachOut):
homeOpacity = 1
targetOpacity = 0
trialStep = 3
if (CursorHomeDistance < .05):
# maybe this ends the loop prematurely?
print('end step 3'+' ('+str(globalClock.getTime())+')')
continueRoutine = False
#steps = steps.append(step)
if counter == 5 or counter == 9:
cursorPosX = (trialMouse.getPos()[0]*cos(rtd))-(trialMouse.getPos()[1]*sin(rtd))
CursorPosY = (trialMouse.getPos()[0]*sin(rtd))+(trialMouse.getPos()[1]*cos(rtd))
if not(homeStart):
homeOpacity = 1
targetOpacity = 0
trialStep = 1
if (CursorHomeDistance < .025):
homeStart = True
print('end step 1'+' ('+str(globalClock.getTime())+')')
if (not(reachOut) and homeStart):
homeOpacity = 0
targetOpacity = 1
trialStep = 2
if (CursorTargetDistance < .025):
reachOut = True
print('end step 2'+' ('+str(globalClock.getTime())+')')
if (reachOut):
homeOpacity = 1
targetOpacity = 0
trialStep = 3
if (CursorHomeDistance < .025):
# maybe this ends the loop prematurely?
print('end step 3'+' ('+str(globalClock.getTime())+')')
continueRoutine = False
#steps = steps.append(step)
if counter == 6 or counter == 10:
CursorTargetDistance = sqrt((trialCursor.pos[0]-trialTarget.pos[0])**2 + (trialCursor.pos[1]-trialTarget.pos[1])**2)
CursorHomeDistance = sqrt(trialCursor.pos[0]**2 + trialCursor.pos[1]**2)
cursorPosX = sqrt((trialMouse.getPos()[0]**2)+(trialMouse.getPos()[1]**2))*(cos(theta))
CursorPosY = sqrt((trialMouse.getPos()[0]**2)+(trialMouse.getPos()[1]**2))*(sin(theta))
if step == 2:
theta = (targetangle / 180) * pi
rx = dist * cos(theta)
ry = dist * sin(theta)
radius = 1
else:
if dist > 1:
rx = homex
ry = homey
radius = dist
diameter = 2*dist
else:
rx = xmouse
ry = ymouse
radius = 1
if not(homeStart):
homeOpacity = 1
targetOpacity = 0
trialStep = 1
bufferOpacity = 0
cursorOpacity = 1
if (CursorHomeDistance < .075):
homeStart = True
print('end step 1'+' ('+str(globalClock.getTime())+')')
if (not(reachOut) and homeStart):
homeOpacity = 0
targetOpacity = 1
trial4Step = 2
bufferOpacity = 0
cursorOpacity = 1
if (CursorTargetDistance < .025):
reachOut = True
print('end step 2'+' ('+str(globalClock.getTime())+')')
if (reachOut):
homeOpacity = 1
targetOpacity = 0
trial4Step = 3
#COntrols the 'buffer'
bufferOpacity = 1
bufferRadius = (sqrt(trial4Cursor.pos[0]**2 + trial4Cursor.pos[1]**2))
#controls the cursor
cursorOpacity = 0
if (CursorHomeDistance < .2):
cursorOpacity = 1
if (CursorHomeDistance < .075):
# maybe this ends the loop prematurely?
print('end step 3'+' ('+str(globalClock.getTime())+')')
continueRoutine = False
#steps = steps.append(step)
# *trialTarget* updates
if trialTarget.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
trialTarget.frameNStart = frameN # exact frame index
trialTarget.tStart = t # local t and not account for scr refresh
trialTarget.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(trialTarget, 'tStartRefresh') # time at next scr refresh
trialTarget.setAutoDraw(True)
if trialTarget.status == STARTED: # only update if drawing
trialTarget.setOpacity(targetOpacity, log=False)
trialTarget.setPos(targetPos, log=False)
# *trialHome* updates
if trialHome.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
trialHome.frameNStart = frameN # exact frame index
trialHome.tStart = t # local t and not account for scr refresh
trialHome.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(trialHome, 'tStartRefresh') # time at next scr refresh
trialHome.setAutoDraw(True)
if trialHome.status == STARTED: # only update if drawing
trialHome.setOpacity(homeOpacity, log=False)
trialHome.setPos(homePos, log=False)
# *trialCursor* updates
if trialCursor.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
trialCursor.frameNStart = frameN # exact frame index
trialCursor.tStart = t # local t and not account for scr refresh
trialCursor.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(trialCursor, 'tStartRefresh') # time at next scr refresh
trialCursor.setAutoDraw(True)
if trialCursor.status == STARTED: # only update if drawing
trialCursor.setOpacity(cursorOpacity, log=False)
trialCursor.setPos([mousePosX, mousePosY], log=False)
# *trialNum* updates
if trialNum.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
trialNum.frameNStart = frameN # exact frame index
trialNum.tStart = t # local t and not account for scr refresh
trialNum.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(trialNum, 'tStartRefresh') # time at next scr refresh
trialNum.setAutoDraw(True)
# *trialSkip* updates
waitOnFlip = False
if trialSkip.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
trialSkip.frameNStart = frameN # exact frame index
trialSkip.tStart = t # local t and not account for scr refresh
trialSkip.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(trialSkip, 'tStartRefresh') # time at next scr refresh
trialSkip.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(trialSkip.clock.reset) # t=0 on next screen flip
win.callOnFlip(trialSkip.clearEvents, eventType='keyboard') # clear events on next screen flip
if trialSkip.status == STARTED and not waitOnFlip:
theseKeys = trialSkip.getKeys(keyList=['space'], waitRelease=False)
_trialSkip_allKeys.extend(theseKeys)
if len(_trialSkip_allKeys):
trialSkip.keys = _trialSkip_allKeys[-1].name # just the last key pressed
trialSkip.rt = _trialSkip_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# | |
<reponame>open-publishing/open-publishing-api
import random
import string
import datetime
import os
import mimetypes
import json
import collections
import requests
import jsonschema
import pkg_resources
from open_publishing.core.enums import DocumentStatus, Language, Country, VLBCategory
from open_publishing.core.enums import License, PreviewFileType, FileType, ContributorRole, BisacCode, ThemaCode, UsersSearchType
from open_publishing.core.enums import OnixStatus
from .stubbornness import stubborn, RetryNotPossible
class ObjectHasChanged(RetryNotPossible, Exception):
pass
class ObjectNotFound(RetryNotPossible, Exception):
pass
class AssetCreationError(RetryNotPossible, Exception):
pass
class TemporaryNotAvailable(Exception):
pass
class GJP():
def __init__(self,
ctx,
validate_json):
self._ctx = ctx
self._validate_json = validate_json
self._enum_cache = {}
self._log = self._ctx.log.getChild('gjp')
self._session = requests.Session()
@property
def session(self):
return self._session
@property
def log(self):
return self._log
def update(self, object_class, object_id, version, gjp):
""" General method to update a gjp object """
path = '/resource/v2/'
data = [{
'GUID': '{object_class}.{object_id}'.format(object_class=object_class,
object_id=object_id),
'VERSION': version,
}]
data[0].update(gjp)
headers = {
'Content-type': 'application/json',
'Accept': 'text/plain',
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token
}
response = self._session.put(self._ctx.host + path,
data=json.dumps(data),
headers=headers,
**self._ctx.requests_kwargs)
self._check_response(response, self._validate_json)
def update_chunk(self, gjp_list):
""" General method to update a gjp object """
path = '/resource/v2/'
data = []
for gjp_info in gjp_list:
gjp = gjp_info['gjp'].copy()
gjp['GUID'] = gjp_info['guid']
gjp['VERSION'] = gjp_info['version']
data.append(gjp)
headers = {
'Content-type': 'application/json',
'Accept': 'text/plain',
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.put(self._ctx.host + path,
data=json.dumps(data),
headers=headers,
**self._ctx.requests_kwargs)
self._check_response(response, self._validate_json)
@stubborn
def get(self, object_class, object_id, fields, params=None):
""" General method to retrieve a gjp object """
if object_id is None:
path = '/resource/v2/{object_class}[{fields}]'.format(object_class=object_class,
fields=','.join(fields))
else:
path = '/resource/v2/{object_class}.{object_id}[{fields}]'.format(object_class=object_class,
object_id=object_id,
fields=','.join(fields))
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.get(self._ctx.host + path,
params=params,
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response, self._validate_json)['OBJECTS']
@stubborn
def get_chunk(self, guids, fields):
""" General method to retrieve a gjp objects in chunk """
if len(guids) == 0:
return {}
fields_str = ','.join(fields)
path = '/resource/v2/' + ','.join(['{guid}[{fields}]'.format(guid=guid,
fields=fields_str) for guid in set(guids)])
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
self.log.debug(path + '&' + ','.join(['='.join(item) for item in list(params.items())]))
response = self._session.get(self._ctx.host + path,
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response, self._validate_json)['OBJECTS']
def delete(self, object_class, object_id):
""" General method to delete a gjp object """
path = '/resource/v2/{object_class}.{object_id}'.format(object_class=object_class,
object_id=object_id)
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.delete(self._ctx.host + path,
headers=headers,
**self._ctx.requests_kwargs)
self._check_response(response, self._validate_json)
def create(self, object_class, **fields):
""" General method to create a gjp object """
data = [{
'GUID': '{0}.'.format(object_class),
'VERSION': 0,
}]
data[0].update(fields)
headers = {
'Content-type': 'application/json',
'Accept': 'text/plain',
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.post(self._ctx.host + '/resource/v2',
data=json.dumps(data),
headers=headers,
**self._ctx.requests_kwargs)
gjp = self._check_response(response, self._validate_json)
guid = gjp['RESULTS'][0]
return gjp['OBJECTS'][guid]
@staticmethod
def _encode_params(params):
res = {}
for key, value in list(params.items()):
if isinstance(value, str):
res[key] = value.encode('utf-8')
else:
res[key] = value
return res
def documents_search(self,
query=None,
status=None,
created=None,
language=None,
page_count=None,
license=None):
""" RPC to search entities"""
params = {
'display': 0,
}
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
if isinstance(query, str):
params['query'] = query
elif query is None:
pass
else:
raise ValueError('Invalid `query`')
if status is not None:
if not isinstance(status, collections.Iterable):
status = [status]
for st in status:
if st not in DocumentStatus:
raise ValueError("Ivalid `status`")
if DocumentStatus.new in status:
params['new_status'] = 'new'
if DocumentStatus.published in status:
params['published_status'] = 'published'
if DocumentStatus.unpublished in status:
params['unpublished_status'] = 'unpublished'
if DocumentStatus.deleted in status:
params['del_status'] = 'deleted'
if created is None:
pass
elif isinstance(created, datetime.date):
params['created-date-from'] = created.strftime("%Y-%m-%d")
params['created-date-to'] = created.strftime("%Y-%m-%d")
elif isinstance(created, tuple) and len(created) == 2:
for date in created:
if not (isinstance(date, datetime.date) or date is None):
raise ValueError("Invalid `created`")
if created[0] is not None:
params['created-date-from'] = created[0].strftime("%Y-%m-%d")
if created[1] is not None:
params['created-date-to'] = created[1].strftime("%Y-%m-%d")
else:
raise ValueError("Invalid `created`")
if language is None:
pass
elif language in Language:
params['language_id'] = self.resolve_enum(Language, enum=language).internal_id
elif isinstance(language, str) and len(language) == 3:
params['language_id'] = self.resolve_enum(Language, code=language).internal_id
else:
raise ValueError("Invalid `language`")
def page_tuple_to_range(t):
def count_to_str(count):
if count is None:
return ""
if isinstance(count, int):
return str(count)
raise ValueError("Invalid page range")
if t is None:
return ""
if t == (None, None):
return ""
if isinstance(t, int):
return str(t)
if isinstance(t, tuple) and len(t) == 2:
return count_to_str(t[0]) + "-" + count_to_str(t[1])
raise ValueError("Invalid page range")
if page_count is None:
pass
elif isinstance(page_count, int):
params['page-count'] = str(page_count)
elif isinstance(page_count, tuple) and len(page_count) == 2:
params['page-count'] = page_tuple_to_range(page_count)
elif isinstance(page_count, list):
_page_count = []
for pc in page_count:
_page_count.append(page_tuple_to_range(pc))
params['page-count'] = ",".join(_page_count)
else:
raise ValueError("Invalid `page_count`")
if license is None:
pass
elif license in License:
params['license'] = license.identifier
else:
raise ValueError("Invalid `license`")
response = self._session.get(self._ctx.host + '/rpc/resource_search/admin-document/',
params=self._encode_params(params),
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response)['OBJECTS']
def users_search(self,
query=None,
search_type=None):
""" RPC to search entities"""
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
params = {
'display': 0,
}
data = {}
if isinstance(query, str):
data['query'] = query
elif query is None:
pass
else:
raise ValueError('Invalid `query`')
if search_type is None:
pass
elif search_type in UsersSearchType:
data['search-type'] = search_type.identifier
else:
raise ValueError('Invalid `search_type`')
response = self._session.get(self._ctx.host + '/rpc/resource_search/admin-user/',
params=self._encode_params(params),
headers=headers,
data=data,
**self._ctx.requests_kwargs)
return self._check_response(response)['OBJECTS']
def _upload(self, file_name, upload_module, upload_method, alias_name=None, rpc_parameter=None):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
params = {
"method" : upload_method,
}
if alias_name is None:
alias_name = file_name
files = {'file': (alias_name, open(file_name, 'rb'), mimetypes.guess_type(file_name))}
response = self._session.put(self._ctx.host + '/rpc/' + upload_module,
data=rpc_parameter,
files=files,
params=params,
headers=headers,
**self._ctx.requests_kwargs)
self._check_response(response)
def preview_upload(self, document_id, file_name, preview_file_type):
if preview_file_type not in PreviewFileType:
raise ValueError("file_type should be one of op.files.filetype.*")
self._upload(
file_name=file_name,
upload_module="document_admin_preview_upload",
upload_method="upload",
rpc_parameter={
"file_type":preview_file_type,
"source_type":"document",
"reference_id":document_id}
)
def upload_asset(self, document_id, file_name, file_type):
if file_type not in FileType:
raise ValueError("file_type should be one of op.files.filetype.*")
self._upload(
file_name=file_name,
upload_module="document_admin_upload",
upload_method="upload",
rpc_parameter={
"file_type":file_type.identifier,
"document_id":document_id}
)
def download_asset(self, file_id):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
params = {
'method' : 'download',
'file_id' : file_id
}
response = self._session.get(self._ctx.host + '/rpc/document_admin_upload',
params=params,
headers=headers,
**self._ctx.requests_kwargs)
self._check_status_code(response)
return response.content
def upload_avatar(self, user_id, file_name):
self._upload(
file_name=file_name,
upload_module="upload_picture",
upload_method="upload_picture",
rpc_parameter={
"response": "gjp",
"source_type": "user",
"reference_id": user_id}
)
def enqueue_import(self, file_name, alias_name):
self._upload(
file_name=file_name,
alias_name=alias_name,
upload_module="admin_asset_upload",
upload_method="upload",
rpc_parameter={
"filename": alias_name or file_name,
"response": "gjp"}
)
def download_from_archive(self, url):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.get(url,
headers=headers,
**self._ctx.requests_kwargs)
self._check_status_code(response)
return response.content
def _user_rpc(self, data):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.put(self._ctx.host + '/rpc/users',
data=data,
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response)
def _price_periods_rpc(self, data):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.get(self._ctx.host + '/rpc/price_periods',
data=data,
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response)
def get_price_periods(
self,
document_id,
country,
date_from=None,
date_to=None,
include_campains=True,
remove_outdated=None
):
data = {
'document_id': document_id,
}
if country in Country:
data['country_ids'] = self.resolve_enum(Country, enum=country).internal_id
elif isinstance(country, str):
data['country_ids'] = self.resolve_enum(Country, code=country).internal_id
else:
raise ValueError("Invalid `country`")
if date_from:
data['date_from'] = date_from
if date_to:
data['date_to'] = date_to
if include_campains:
data['include_campains'] = True
if remove_outdated is not None:
data['remove_outdated'] = remove_outdated
return self._price_periods_rpc(data)['OBJECTS'][0]
def reset_password(self, user_id):
data = {
'method': 'send_passwd',
'user_id': user_id,
}
self._user_rpc(data)
def set_password(self, user_id, password):
data = {
'method': 'change_user_passwd',
'user_id': user_id,
'changepassword': password,
}
self._user_rpc(data)
def create_user(self,
first_name,
last_name,
email):
data = {
'method': 'create_user',
'first_name': first_name,
'last_name': last_name,
'email': email,
}
return self._user_rpc(data)['user_id']
def set_document_external_availability(self, name, country_code, document_id, publication_type,
availability_status, availability, in_stock_quantity,
price_cent, currency_code, shop_url):
data = {
'method': 'report',
'name': name,
'country_code': country_code,
'document_id': document_id,
'publication_type': publication_type,
'availability_status': availability_status,
'availability': availability,
'in_stock_quantity': in_stock_quantity,
'shop_url': shop_url
}
if price_cent is not None:
data['price_cent'] = price_cent
if currency_code is not None:
data['currency_code'] = currency_code
self._document_external_availability_rpc(data)
def _document_external_availability_rpc(self, data):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.put(self._ctx.host + '/rpc/document_external_availability',
data=data,
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response)
def set_document_external_salesrank(self, name, country_code, document_id, publication_type, salesrank):
data = {
'method': 'report',
'name': name,
'country_code': country_code,
'document_id': document_id,
'publication_type': publication_type
}
if salesrank is not None:
data['salesrank'] = salesrank
self._document_external_salesrank_rpc(data)
def _document_external_salesrank_rpc(self, data):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
response = self._session.put(self._ctx.host + '/rpc/document_external_salesrank',
data=data,
headers=headers,
**self._ctx.requests_kwargs)
return self._check_response(response)
def set_document_external_rating(self, name, country_code, document_id, publication_type, average_rating, review_count, shop_review_url):
data = {
'method': 'report',
'name': name,
'country_code': country_code,
'document_id': document_id,
'publication_type': publication_type,
'review_count': review_count,
'shop_review_url': shop_review_url
}
if average_rating is not None:
data['average_rating'] = average_rating
self._document_external_rating_rpc(data)
def _document_external_rating_rpc(self, data):
headers = {
'Authorization': 'Bearer ' + self._ctx.auth_context.auth_token,
}
| |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
import unittest
import os
import tempfile
import shutil
import time
import redis
from rmtest import ModuleTestCase
if "REDIS_MODULE_PATH" not in os.environ:
os.environ["REDIS_MODULE_PATH"] = "../../target/release/libredis_sql.so"
os.environ["RUST_BACKTRACE"] = "full"
class Table():
def __init__(self, redis, name, values, key = ""):
self.redis = redis
self.key = key
self.name = name
self.values = values
def __enter__(self):
create_table = "CREATE TABLE {} {}".format(self.name, self.values)
if self.key:
self.redis.client.execute_command("REDISQL.EXEC", self.key, create_table)
else:
self.redis.client.execute_command("REDISQL.EXEC", create_table)
def __exit__(self, type, value, traceback):
drop_table = "DROP TABLE {}".format(self.name)
if self.key:
self.redis.client.execute_command("REDISQL.EXEC", self.key, drop_table)
else:
self.redis.client.execute_command("REDISQL.EXEC", drop_table)
class DB():
def __init__(self, redis, key):
self.redis = redis
self.key = key
def __enter__(self):
self.redis.client.execute_command("REDISQL.CREATE_DB", self.key)
def __exit__(self, type, value, traceback):
self.redis.client.execute_command("DEL", self.key)
class TestRediSQLWithExec(ModuleTestCase('')):
def setUp(self):
self.disposable_redis = self.redis()
def tearDown(self):
pass
def exec_naked(self, *command):
return self.client.execute_command(*command)
def exec_cmd(self, *command):
return self.client.execute_command("REDISQL.EXEC", *command)
def create_db(self, key):
return self.client.execute_command("REDISQL.CREATE_DB", key)
def delete_db(self, key):
return self.client.execute_command("DEL", key)
class TestRediSQLExec(TestRediSQLWithExec):
def test_ping(self):
self.assertTrue(self.client.ping())
def test_create_table(self):
with DB(self, "A"):
done = self.exec_cmd("A", "CREATE TABLE test1 (A INTEGER);")
self.assertEquals(done, ["DONE", 0L])
done = self.exec_cmd("A", "DROP TABLE test1")
self.assertEquals(done, ["DONE", 0L])
def test_insert(self):
with DB(self, "B"):
with Table(self, "test2", "(A INTEGER)", key = "B"):
done = self.exec_cmd("B", "INSERT INTO test2 VALUES(2);")
self.assertEquals(done, ["DONE", 1L])
def test_select(self):
with DB(self, "C"):
with Table(self, "test3", "(A INTEGER)", key = "C"):
done = self.exec_cmd("C", "INSERT INTO test3 VALUES(2);")
self.assertEquals(done, ["DONE", 1L])
result = self.exec_cmd("C", "SELECT * from test3")
self.assertEquals(result, [[2]])
self.exec_cmd("C", "INSERT INTO test3 VALUES(3);")
result = self.exec_cmd("C", "SELECT * from test3 ORDER BY A")
self.assertEquals(result, [[2], [3]])
self.exec_cmd("C", "INSERT INTO test3 VALUES(4);")
result = self.exec_cmd("C", "SELECT * FROM test3 ORDER BY A")
self.assertEquals(result, [[2], [3], [4]])
def test_single_remove(self):
with DB(self, "D"):
with Table(self, "test4", "(A INTEGER)", key = "D"):
self.exec_cmd("D", "INSERT INTO test4 VALUES(2);")
self.exec_cmd("D", "INSERT INTO test4 VALUES(3);")
self.exec_cmd("D", "INSERT INTO test4 VALUES(4);")
result = self.exec_cmd("D", "SELECT * FROM test4 ORDER BY A")
self.assertEquals(result, [[2], [3], [4]])
self.exec_cmd("D", "DELETE FROM test4 WHERE A = 3;")
result = self.exec_cmd("D", "SELECT * FROM test4 ORDER BY A")
self.assertEquals(result, [[2], [4]])
def test_big_select(self):
elements = 50
with DB(self, "E"):
with Table(self, "test5", "(A INTERGER)", key = "E"):
pipe = self.client.pipeline(transaction=False)
for i in xrange(elements):
pipe.execute_command("REDISQL.EXEC", "E",
"INSERT INTO test5 VALUES({})".format(i))
pipe.execute()
result = self.exec_cmd("E", "SELECT * FROM test5 ORDER BY A")
self.assertEquals(result, [[x] for x in xrange(elements)])
def test_multiple_row(self):
with DB(self, "F"):
with Table(self, "test6", "(A INTEGER, B REAL, C TEXT)", key= "F"):
self.exec_cmd("F", "INSERT INTO test6 VALUES(1, 1.0, '1point1')")
self.exec_cmd("F", "INSERT INTO test6 VALUES(2, 2.0, '2point2')")
self.exec_cmd("F", "INSERT INTO test6 VALUES(3, 3.0, '3point3')")
self.exec_cmd("F", "INSERT INTO test6 VALUES(4, 4.0, '4point4')")
self.exec_cmd("F", "INSERT INTO test6 VALUES(5, 5.0, '5point5')")
result = self.exec_cmd("F", "SELECT A, B, C FROM test6 ORDER BY A")
result = [[A, float(B), C] for [A, B, C] in result]
self.assertEquals(result,
[[1L, 1.0, "1point1"], [2L, 2.0, '2point2'],
[3L, 3.0, '3point3'], [4L, 4.0, '4point4'],
[5L, 5.0, '5point5']])
def test_join(self):
with DB(self, "G"):
with Table(self, "testA", "(A INTEGER, B INTEGER)", key = "G"):
with Table(self, "testB", "(C INTEGER, D INTEGER)", key = "G"):
self.exec_cmd("G", "INSERT INTO testA VALUES(1, 2)")
self.exec_cmd("G", "INSERT INTO testA VALUES(3, 4)")
self.exec_cmd("G", "INSERT INTO testB VALUES(1, 2)")
self.exec_cmd("G", "INSERT INTO testB VALUES(3, 4)")
result = self.exec_cmd("G", "SELECT A, B, C, D FROM testA, testB WHERE A = C ORDER BY A")
self.assertEquals(result, [[1, 2, 1, 2], [3, 4, 3, 4]])
def runTest(self):
pass
class NoDefaultDB(TestRediSQLWithExec):
def test_that_we_need_a_key(self):
with self.assertRaises(redis.exceptions.ResponseError):
self.exec_cmd("SELECT 'foo';")
class TestRediSQLKeys(TestRediSQLWithExec):
def test_create_and_destroy_key(self):
ok = self.create_db("A_REDISQL")
self.assertEquals(ok, "OK")
keys = self.client.keys("A_REDISQL")
self.assertEquals(["A_REDISQL"], keys)
ok = self.delete_db("A_REDISQL")
keys = self.client.keys("A_REDISQL")
self.assertEquals([], keys)
def test_create_table_inside_key(self):
with DB(self, "A"):
done = self.exec_cmd("A", "CREATE TABLE t1 (A INTEGER);")
self.assertEquals(done, ["DONE", 0L])
done = self.exec_cmd("A", "DROP TABLE t1")
self.assertEquals(done, ["DONE", 0L])
def test_insert_into_table(self):
with DB(self, "B"):
with Table(self, "t2", "(A INTEGER, B INTEGER)", key = "B"):
done = self.exec_cmd("B", "INSERT INTO t2 VALUES(1,2)")
self.assertEquals(done, ["DONE", 1L])
result = self.exec_cmd("B", "SELECT * FROM t2")
self.assertEquals(result, [[1, 2]])
class TestMultipleInserts(TestRediSQLWithExec):
def test_insert_two_rows(self):
with DB(self, "M"):
with Table(self, "t1", "(A INTEGER, B INTEGER)", key = "M"):
done = self.exec_naked("REDISQL.EXEC", "M", "INSERT INTO t1 values(1, 2);")
self.assertEquals(done, ["DONE", 1L])
done = self.exec_naked("REDISQL.EXEC", "M", "INSERT INTO t1 values(3, 4),(5, 6);")
self.assertEquals(done, ["DONE", 2L])
done = self.exec_naked("REDISQL.EXEC", "M", "INSERT INTO t1 values(7, 8);")
self.assertEquals(done, ["DONE", 1L])
def test_multi_insert_same_statement(self):
with DB(self, "N"):
with Table(self, "t1", "(A INTEGER, B INTEGER)", key = "N"):
done = self.exec_naked("REDISQL.EXEC", "N", "INSERT INTO t1 values(1, 2); INSERT INTO t1 values(3, 4);")
self.assertEquals(done, ["DONE", 2L])
done = self.exec_naked("REDISQL.EXEC", "N", """BEGIN;
INSERT INTO t1 values(3, 4);
INSERT INTO t1 values(5, 6);
INSERT INTO t1 values(7, 8);
COMMIT;""")
self.assertEquals(done, ["DONE", 3L])
done = self.exec_naked("REDISQL.EXEC", "N", """BEGIN;
INSERT INTO t1 values(3, 4);
INSERT INTO t1 values(5, 6);
INSERT INTO t1 values(7, 8);
INSERT INTO t1 values(3, 4);
INSERT INTO t1 values(5, 6);
INSERT INTO t1 values(7, 8);
COMMIT;""")
self.assertEquals(done, ["DONE", 6L])
class TestJSON(TestRediSQLWithExec):
def test_multiple_insert_on_different_types(self):
with DB(self, "H"):
with Table(self, "j1", "(A text, B int)", key = "H"):
done = self.exec_naked("REDISQL.EXEC", "H", """BEGIN;
INSERT INTO j1 VALUES ('{\"foo\" : \"bar\"}', 1);
INSERT INTO j1 VALUES ('{\"foo\" : 3}', 2);
INSERT INTO j1 VALUES ('{\"foo\" : [1, 2, 3]}', 3);
INSERT INTO j1 VALUES ('{\"foo\" : {\"baz\" : [1, 2, 3]}}', 4);
COMMIT;""")
self.assertEquals(done, ["DONE", 4L])
result = self.exec_naked("REDISQL.EXEC", "H", "SELECT json_extract(A, '$.foo') FROM j1 ORDER BY B;")
self.assertEquals(result, [["bar"], [3], ["[1,2,3]"], ['{"baz":[1,2,3]}']])
class TestStatements(TestRediSQLWithExec):
def test_create_statement(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
ok = self.exec_naked("REDISQL.CREATE_STATEMENT", "A", "insert", "insert into t1 values(?1);")
self.assertEquals(ok, "OK")
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "3")
self.assertEquals(done, ["DONE", 1L])
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "4")
self.assertEquals(done, ["DONE", 1L])
result = self.exec_cmd("A", "SELECT * FROM t1 ORDER BY A;")
self.assertEquals(result, [[3], [4]])
def test_multi_statement_single_bind(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
ok = self.exec_naked("REDISQL.CREATE_STATEMENT", "A", "insert", "insert into t1 values(?1); insert into t1 values(?1 + 1);")
self.assertEquals(ok, "OK")
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "3")
self.assertEquals(done, ["DONE", 2L])
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "5")
self.assertEquals(done, ["DONE", 2L])
result = self.exec_cmd("A", "SELECT * FROM t1 ORDER BY A;")
self.assertEquals(result, [[3], [4], [5], [6]])
def test_multi_statement_multi_table_single_bind(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
with Table(self, "t2", "(A INTEGER)", key = "A"):
ok = self.exec_naked("REDISQL.CREATE_STATEMENT", "A", "insert", "insert into t1 values(?1); insert into t2 values(?1 - 1);")
self.assertEquals(ok, "OK")
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "3")
self.assertEquals(done, ["DONE", 2L])
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "5")
self.assertEquals(done, ["DONE", 2L])
result = self.exec_cmd("A", "SELECT * FROM t1 ORDER BY A;")
self.assertEquals(result, [[3], [5]])
result = self.exec_cmd("A", "SELECT * FROM t2 ORDER BY A;")
self.assertEquals(result, [[2], [4]])
def test_multi_statement_different_bindings(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
ok = self.exec_naked("REDISQL.CREATE_STATEMENT", "A", "insert", "insert into t1 values(?1); insert into t1 values(?2 + 1); select * from t1;")
self.assertEquals(ok, "OK")
result = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "3", "8")
self.assertEquals(result, [[3], [9]])
def test_update_statement(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
ok = self.exec_naked("REDISQL.CREATE_STATEMENT", "A", "insert", "insert into t1 values(?1);")
self.assertEquals(ok, "OK")
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "3")
self.assertEquals(done, ["DONE", 1L])
ok = self.exec_naked("REDISQL.UPDATE_STATEMENT", "A", "insert", "insert into t1 values(?1 + 10001);")
self.assertEquals(ok, "OK")
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "4")
self.assertEquals(done, ["DONE", 1L])
result = self.exec_cmd("A", "SELECT * FROM t1 ORDER BY A;")
self.assertEquals(result, [[3], [10005]])
def test_rdb_persistency(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
ok = self.exec_naked("REDISQL.CREATE_STATEMENT", "A", "insert", "insert into t1 values(?1);")
self.assertEquals(ok, "OK")
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "3")
self.assertEquals(done, ["DONE", 1L])
for _ in self.retry_with_reload():
pass
time.sleep(0.5)
done = self.exec_naked("REDISQL.EXEC_STATEMENT", "A", "insert", "4")
self.assertEquals(done, ["DONE", 1L])
result = self.exec_cmd("A", "SELECT * FROM t1 ORDER BY A;")
self.assertEquals(result, [[3], [4]])
def test_rdb_persistency_no_statements(self):
with DB(self, "A"):
with Table(self, "t1", "(A INTEGER)", key = "A"):
done = self.exec_cmd("A", "INSERT INTO t1 VALUES(5)")
self.assertEquals(done, ["DONE", 1L])
for _ in self.retry_with_reload():
pass
time.sleep(0.5)
done = self.exec_cmd("A", "INSERT INTO t1 VALUES(6)")
self.assertEquals(done, ["DONE", 1L])
result = self.exec_cmd("A", "SELECT * FROM t1 ORDER BY A;")
self.assertEquals(result, [[5], [6]])
| |
<filename>device_drivers/device_attributes.py
#!/usr/bin/python
# @file device_attributes.py
#
# Python classes to represent attributes of a Linux device
#
# @author <NAME>
# @date 2020
# @copyright 2020 Audio Logic
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# <NAME>
# Audio Logic
# 985 Technology Blvd
# Bozeman, MT 59718
# <EMAIL>
from device_drivers.device import DeviceType, DRIVER_PREFIX
class DataType:
"""Represent a fixed point number."""
def __init__(self, name, width, signed = False, fractional_bits = 0):
"""Initialize a fixed point numerical data type.
Parameters
----------
name : str
Name of the data type
width : int
Number of bits in the data type
signed : bool, optional
True if the data type is signed, false if unsigned. By default False
fractional_bits : int, optional
Number of fractional bits in the data type, by default 0
"""
self.name = name
self.signed = signed
self.width = width
self.fractional_bits = fractional_bits
@staticmethod
def parse_json(dt_json):
"""Parse JSON object into DataType."""
return DataType(dt_json['type'], dt_json['wordLength'], dt_json['signed'], dt_json['fractionLength'])
class DeviceAttribute:
"""Represent a device attribute on a Linux device driver."""
def __init__ (self, name, data_type, permissions = "0664"):
"""Initialize a representation of a Linux device driver attribute.
Parameters
----------
name : str
[description]
data_type : [type]
[description]
offset : int, optional
[description], by default 0
permissions : str, optional
[description], by default "0664"
"""
self.name = name
self.data_type = data_type
self.permissions = permissions
@staticmethod
def parse_json(dev_attr_json, device_type):
"""Parse JSON object into DeviceAttribute.
Parameters
----------
dev_attr_json : dict
Dictionary representing attribute from JSON
device_type : DeviceType
Represents what device interface this attribute is tied to
Returns
-------
DeviceAttribute
Returns device attribute matching device_type
Raises
------
ValueError
Raises error if unsupported DeviceType is passed in
"""
data_type = DataType.parse_json(dev_attr_json["dataType"])
if device_type == DeviceType.FPGA:
return FPGADeviceAttribute(dev_attr_json['name'], data_type, dev_attr_json['registerNumber'])
elif device_type == DeviceType.SPI:
pass
elif device_type == DeviceType.I2C:
pass
else:
raise ValueError("Unsupported device type requested")
def create_variable_declaration(self):
if self.data_type.name == "string":
return f"char *{self.name};\n"
elif self.data_type.signed:
return f"int {self.name};\n"
else:
return f"unsigned int {self.name};\n"
def create_func_prototypes(self):
"""Create C function prototypes for attribute read and write
Returns
-------
str
Returns string representing C function prototypes
"""
c_code = ""
if(self.permissions != "0444"):
c_code += "static ssize_t " + self.name + \
"_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count);\n"
c_code += "static ssize_t " + self.name + \
"_read (struct device *dev, struct device_attribute *attr, char *buf);\n"
return c_code
def create_read_func(self, device_name):
"""Create C function definition for reading the attribute value.
Parameters
----------
device_name : str
Name of device the attibute belongs to
Returns
-------
str
Returns C function definition for reading the attribute value
"""
c_code = "static ssize_t " + self.name + "_read(struct device *dev, struct device_attribute *attr, char *buf) {\n"
c_code += f" {DRIVER_PREFIX}_{device_name}_dev_t * devp = ({DRIVER_PREFIX}_{device_name}_dev_t *)dev_get_drvdata(dev);\n"
if self.data_type.name == "string":
c_code += self._read_string()
else:
c_code += self._read_int()
c_code += " return strlen(buf);\n"
c_code += "}\n\n"
return c_code
def _read_string(self):
c_code = ""
c_code += f" sprintf(buf, \"%s\\n\", devp->{self.name});\n"
return c_code
def _read_int(self):
c_code = ""
c_code += " fp_to_string(buf, devp->" + self.name + \
", " + str(self.data_type.fractional_bits) + ", " + \
str(self.data_type.signed).lower()+ ", " + \
str(self.data_type.width) + ");\n"
c_code += " strcat2(buf,\"\\n\");\n"
return c_code
# TODO: Implement it with writing to register for register only attributes
def create_write_func(self, device_name):
"""Create C function definition for writing to the attribute.
Parameters
----------
device_name : str
Name of device the attibute belongs to
Returns
-------
str
Returns C function definition for writing to the attribute
"""
write_func = ""
if(self.permissions != "0444"):
pass
return write_func
def create_macro(self):
"""Create DEVICE_ATTR macro for attribute.
Returns
-------
str
Returns DEVICE_ATTR macro for the attribute
"""
if(self.permissions == "0444"):
write_func = "NULL"
else:
write_func = self.name + "_write"
c_code = ("DEVICE_ATTR(" + self.name + ", " + self.permissions
+ ", " + self.name + "_read, " + write_func
+ ");\n")
return c_code
class FPGADeviceAttribute(DeviceAttribute):
def __init__ (self, name, data_type, offset = 0, permissions = "0664"):
super().__init__(name, data_type, permissions)
self.offset = offset
def create_read_func(self, device_name):
"""Create C function definition for reading the FPGA attribute value.
Parameters
----------
device_name : str
Name of device the attibute belongs to
Returns
-------
str
Returns C function definition for reading the attribute value
"""
return super().create_read_func(device_name)
def _read_int(self):
c_code = ""
c_code += f" unsigned int tempValue;\n"
c_code += f" tempValue = ioread32((u32 *)devp->regs + {str(self.offset)});\n"
c_code += " fp_to_string(buf, tempValue" + \
", " + str(self.data_type.fractional_bits) + ", " + \
str(self.data_type.signed).lower()+ ", " + \
str(self.data_type.width) + ");\n"
c_code += " strcat2(buf,\"\\n\");\n"
return c_code
def create_write_func(self, device_name):
"""Create C function definition for writing to the FPGA attribute.
Parameters
----------
device_name : str
Name of device the attibute belongs to
Returns
-------
str
Returns C function definition for writing to the attribute
"""
c_code = "static ssize_t " + self.name + "_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {\n"
c_code += " uint32_t tempValue = 0;\n"
c_code += " char substring[80];\n"
c_code += " int substring_count = 0;\n"
c_code += " int i;\n"
c_code += f" {DRIVER_PREFIX}_{device_name}_dev_t *devp = ({DRIVER_PREFIX}_" + device_name + \
"_dev_t *)dev_get_drvdata(dev);\n"
c_code += " for (i = 0; i < count; i++) {\n"
c_code += " if ((buf[i] != ',') && (buf[i] != ' ') && (buf[i] != '\\0') && (buf[i] != '\\r') && (buf[i] != '\\n')) {\n"
c_code += " substring[substring_count] = buf[i];\n"
c_code += " substring_count ++;\n"
c_code += " }\n"
c_code += " }\n"
c_code += " substring[substring_count] = 0;\n"
is_signed = str(self.data_type.signed).lower()
c_code += " tempValue = set_fixed_num(substring, " + \
str(self.data_type.fractional_bits) + ", " + is_signed + ");\n"
c_code += " devp->" + self.name + " = tempValue;\n"
c_code += " iowrite32(devp->" + self.name + ", (u32 *)devp->regs"
c_code += " + " + str(self.offset)
c_code += ");\n"
c_code += " return count;\n"
c_code += "}\n\n"
return c_code
## Don't do commands. Add this to device attributes directly
## so i2c and spi attributes receive a register address rather than offset
## is_read is handled by whichever function is being called
## spi_addr is either passed into the function or just added to constructor as a prop
## spi_addr should generate a constant in C
## If SPI_ADDR is a C constant, the value doesn't need to be passed around
class SPICommand:
def __init__(self, data, reg_addr, is_read, spi_addr = None):
self.data = data
self.reg_addr = reg_addr
self.is_read = is_read
self.spi_addr = spi_addr
def _append_hex_id(self, val):
if type(val) is int:
val = str(val)
if val[:1] != "0x":
val = "0x" + val
class SPIDeviceAttribute(DeviceAttribute):
def __init__ (self, name, data_type, permissions = "0664"):
super().__init__(name, data_type,permissions)
# .attrWriteComm = [["0x08", "0x06", "0x00"], ["0x08", "0x06", "0x00"], ["0x08", "0x07", "0x00"],
# address + R/W, Register, Data
def create_read_func(self, device_name):
"""Create C function definition for reading the SPI attribute value.
Parameters
----------
device_name : str
Name of device the attibute belongs to
Returns
-------
str
Returns C function definition for reading the attribute value
"""
return super().create_read_func(device_name)
def create_write_func(self, device_name, device_name_abbrev):
"""Create C function definition for writing to the SPI attribute.
Parameters
----------
device_name : str
Name of device the attibute belongs to
Returns
-------
str
Returns C function definition for writing to the attribute
"""
c_code = "static ssize_t " + self.name + "_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {\n"
c_code += " uint32_t tempValue = 0;\n"
c_code += " char substring[80];\n"
c_code += " int substring_count = 0;\n"
c_code += " int i;\n"
c_code += " char cmd[" + str(inputParams.attrWriteCommBytes) + | |
A `torch.Tensor` object representing delayed h_state of shape (batch_size, h_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
prev_c_state: Optional; A `torch.Tensor` object representing previous c_state of shape (batch_size, state_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
delayed_c_state: A `torch.Tensor` object representing delayed c_state of shape (batch_size, state_size). Default is NoneT = torch.FloatTensor([-1e38]) (i.e., will not be used).
Returns:
A tuple of `torch.tensor` objects, (i.e., output_t, (h_state, new_stat)), where output_t is `torch.Tensor` object representing outputs of shape (batch_size, state_size-h_size);
h_state is a `torch.Tensor` object representing the next h_state of shape (batch_size, h_size); new_state is a `torch.Tensor` object representing the next c_state of shape (batch_size, state_size).
"""
if has_delayed_state:
xh = torch.cat([input_t, prev_h_state, delayed_h_state], dim=1)
elif has_prev_state:
xh = torch.cat([input_t, prev_h_state, prev_h_state], dim=1)
else:
empty_h_state = torch.zeros(
input_t.shape[0], 2 * self.h_size, dtype=torch.float
)
xh = torch.cat([input_t, empty_h_state], dim=1)
gates = self.lxh(xh)
chunked_gates = torch.chunk(gates, 4, dim=1)
forget_gate = (chunked_gates[0] + 1).sigmoid()
new_stat = chunked_gates[1].tanh()
out_gate = chunked_gates[3].sigmoid()
if has_prev_state:
if has_delayed_state:
alpha = chunked_gates[2].sigmoid()
weighted_c_state = alpha * prev_c_state + (1 - alpha) * delayed_c_state
else:
weighted_c_state = prev_c_state
new_stat = forget_gate * weighted_c_state + (1 - forget_gate) * new_stat
whole_output = out_gate * new_stat
output_t, h_state = torch.split(
whole_output, [self.out_size, self.h_size], dim=1
)
return output_t, (h_state, new_stat)
class DilatedRNNStack(torch.nn.Module):
"""The recurrent neural network module for global model.
Attributes:
nn_structure: A list of lists of integers representing the strucuture of neural network. For example, [[1,3],[6,12]] defines 2 blocks of 2 layers each and output adaptor layer, with a resNet-style shortcut between output of the first block (output of the second layer)
and output of the second block (output of 4th layer). The positive integers are the dilation number.
cell_name: A string representing the name of the cells, can be 'LSTM', 'LSTM2Cell' or 'S2Cell'.
input_size: An integer representing the number of expected features in the input tensor.
state_size: An integer representing the c state size (which is hidden_size for a standard LSTM cell).
output_size: An integer representing the number of expected features in the final output.
h_size: Optional; An integer representing the number of expected features in h_state. Default is None (i.e., not specified).
jit: Optional; A boolean specifying whether or not to jit each cell. Default is False.
"""
def __init__(
self,
nn_structure: List[List[int]],
cell_name: str,
input_size: int,
state_size: int,
output_size: Optional[int] = None,
h_size=None,
jit=False,
) -> None:
super(DilatedRNNStack, self).__init__()
block_num = len(nn_structure)
self.nn_structure = nn_structure
self.cell_name = cell_name
self.input_size = input_size
self.h_size = h_size
self.jit = jit
self.h_state_store = []
self.c_state_store = []
self.max_dilation = np.max([np.max(t) for t in nn_structure])
self.reset_state()
out_size = self._validate(cell_name, state_size, h_size)
self.cells = []
layer = 0
iblock = 0
for iblock in range(block_num):
for lay in range(len(nn_structure[iblock])):
if lay == 0 and iblock == 0:
tmp_input_size = input_size
else:
tmp_input_size = out_size
if cell_name == "LSTM2Cell":
if jit:
cell = torch.jit.script(
LSTM2Cell(tmp_input_size, h_size, state_size)
)
else:
cell = LSTM2Cell(tmp_input_size, h_size, state_size)
elif cell_name == "S2Cell":
if jit:
cell = torch.jit.script(
S2Cell(tmp_input_size, h_size, state_size)
)
else:
cell = S2Cell(tmp_input_size, h_size, state_size)
else:
cell = torch.nn.LSTMCell(tmp_input_size, state_size)
self.add_module("Cell_{}".format(layer), cell)
self.cells.append(cell)
layer += 1
if isinstance(output_size, int) and output_size > 0:
self.adaptor = torch.nn.Linear(out_size, output_size)
elif output_size is None:
self.adaptor = None
else:
msg = f"output_size should be either None (for encoder) or a positive integer, but receives {output_size}."
logging.error(msg)
raise ValueError(msg)
self.block_num = block_num
self.out_size = out_size
def _validate(self, cell_name: str, state_size: int, h_size: Optional[int]) -> int:
if cell_name not in ["LSTM2Cell", "S2Cell", "LSTM"]:
msg = f"Only support cells 'S2Cell', 'LSTM2Cell', 'LSTM' but receive {cell_name}."
logging.error(msg)
raise ValueError(msg)
if cell_name == "LSTM2Cell" or cell_name == "S2Cell":
if h_size is None:
msg = "h_size should be a positive integer smaller than state_size for LSTM2Cell or S2Cell."
logging.error(msg)
raise ValueError(msg)
if h_size >= state_size:
msg = "h_size should be smaller than state_size."
logging.error(msg)
raise ValueError(msg)
out_size = state_size - h_size
else:
out_size = state_size
return out_size
def prepare_decoder(self, decoder) -> None:
"""Prepare a DilatedRNNStack object used as decoder.
This function copies the last max_dilation tensors in h_state_store and c_state_store to decoder.
Args:
decoder: A :class:`DilatedRNNStack` object representing the decoder.
"""
decoder.h_state_store = self.h_state_store[-self.max_dilation :]
decoder.c_state_store = self.c_state_store[-self.max_dilation :]
return
def _forward_S2Cell(
self,
tmp_input: Tensor,
layer: int,
has_prev_state: bool,
has_delayed_state: bool,
t: int,
ti_1: int,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
"""forward function for S2Cell (to avoid lint warning)."""
if has_delayed_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
delayed_h_state=self.h_state_store[ti_1][layer],
prev_c_state=self.c_state_store[t - 1][layer],
delayed_c_state=self.c_state_store[ti_1][layer],
)
elif has_prev_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
prev_c_state=self.c_state_store[t - 1][layer],
)
else:
output_t, (h_state, new_state) = self.cells[layer](tmp_input, False, False)
return output_t, (h_state, new_state)
def _forward_LSTM2Cell(
self,
tmp_input: Tensor,
layer: int,
has_prev_state: bool,
has_delayed_state: bool,
t: int,
ti_1: int,
) -> Tuple[Tensor, Tuple[Tensor, Tensor]]:
"""Forward function for LSTM2Cell (to avoid lint warning)."""
if has_delayed_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
delayed_h_state=self.h_state_store[ti_1][layer],
c_state=self.c_state_store[ti_1][layer],
)
elif has_prev_state:
output_t, (h_state, new_state) = self.cells[layer](
tmp_input,
has_prev_state,
has_delayed_state,
prev_h_state=self.h_state_store[t - 1][layer],
c_state=self.c_state_store[t - 1][layer],
)
else:
output_t, (h_state, new_state) = self.cells[layer](tmp_input, False, False)
return output_t, (h_state, new_state)
def forward(self, input_t: Tensor) -> Tensor:
"""Forward method of DilatedRNNStack
Args:
input_t: A `torch.Tensor` object representing input features of shape (batch_size, input_size).
Returns:
A `torch.Tensor` object representing outputs of shape (batch_size, output_size).
"""
prev_block_output = torch.zeros(
input_t.shape[0], self.out_size, dtype=torch.float
)
t = len(self.h_state_store)
self.h_state_store.append([])
self.c_state_store.append([])
output_t = NoneT # just to initialize output_t
has_prev_state = t > 0
layer = 0
for iblock in range(self.block_num):
for lay in range(len(self.nn_structure[iblock])):
if lay == 0:
if iblock == 0:
tmp_input = input_t
else:
tmp_input = prev_block_output
else:
tmp_input = output_t
ti_1 = t - self.nn_structure[iblock][lay]
has_delayed_state = ti_1 >= 0
if self.cell_name == "S2Cell":
output_t, (h_state, new_state) = self._forward_S2Cell(
tmp_input, layer, has_prev_state, has_delayed_state, t, ti_1
)
elif self.cell_name == "LSTM2Cell":
output_t, (h_state, new_state) = self._forward_LSTM2Cell(
tmp_input, layer, has_prev_state, has_delayed_state, t, ti_1
)
else: # LSTM
if has_delayed_state:
h_state, new_state = self.cells[layer](
tmp_input,
(
self.h_state_store[ti_1][layer],
self.c_state_store[ti_1][layer],
),
)
elif has_prev_state:
h_state, new_state = self.cells[layer](
tmp_input,
(
self.h_state_store[t - 1][layer],
self.c_state_store[t - 1][layer],
),
)
else:
h_state, new_state = self.cells[layer](tmp_input)
output_t = h_state
self.h_state_store[t].append(h_state)
self.c_state_store[t].append(new_state)
layer += 1
prev_block_output = output_t + prev_block_output
if self.adaptor is not None:
output_t = self.adaptor(prev_block_output)
else:
output_t = prev_block_output
return output_t
def reset_state(self) -> None:
"""Clear all stored state tensors."""
self.h_state_store = []
self.c_state_store = []
class PinballLoss(_Loss):
"""Pinball Loss function module.
For quantile q (0<q<1), forecast value y_hat and true value y, the pinball loss function is defined as:
pinball(y_hat, y, q)=max((y-y_hat)*q, (y-y_hat)*(q-1)).
For quantiles Q = [q_1, q_2, ..., q_n] and weights W = [w_1, w_2, ..., w_n], forecasts Y_hat=[y_hat_1, ..., yhat_n] and true value y, the weighted pinball loss is defined as:
PinballLoss(Y_hat, Y) = Sum_i^n pinball(y_hat_i, y, q_i)*w_i.
This module provides functionality for computing weighted pinball loss.
Attributes:
quantile: A 1-dimensional `torch.Tensor` object representing the quantiles to be calculated.
weight: Optional; A 1-dimensional `torch.Tensor` object representing the weights for quantiles. Default is torch.Tensor([1/n,..,1/n]) where n the number of quantiles.
reduction: Optional; A string representing the reduction method. Can be 'mean' or 'sum'. Default is 'mean'.
"""
def __init__(
self, quantile: Tensor, weight: Optional[Tensor] = None, reduction: str = "mean"
) -> None:
super(PinballLoss, self).__init__(
size_average=None, reduce=None, reduction=reduction
)
if len(quantile) < 1:
msg = "quantile should not be empty."
logging.error(msg)
raise ValueError(msg)
if len(quantile.size()) != 1:
msg = "quantile should be a 1-dimentional tensor."
logging.error(msg)
raise ValueError(msg)
self.quantile = quantile
self.quantile.requires_grad = False
if weight is None:
d = len(quantile)
weight = torch.ones(d) / d
else:
if weight.size() != quantile.size():
msg = "weight and quantile should have the same size."
logging.error(msg)
raise ValueError(msg)
self.register_buffer("weight", weight)
self.reduction = reduction
def _check(self, input: Tensor, target: Tensor) -> None:
"""
Check input tensor and target tensor size.
"""
if target.size()[0] != input.size()[0]:
msg | |
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#============================================================================
# Parts of this library are derived from Twisted:
# Copyright (C) 2001 <NAME>
#
# Copyright (C) 2005 <NAME> <<EMAIL>>
#============================================================================
from mimetools import Message
from cStringIO import StringIO
import math
import socket
import time
import cgi
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
MULTIPLE_CHOICE = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTH_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE_SPACE = 507
NOT_EXTENDED = 510
NO_BODY_CODES = [ NO_CONTENT, NOT_MODIFIED ]
STATUS = {
CONTINUE : "Continue",
SWITCHING_PROTOCOLS : "Switching protocols",
OK : "OK",
CREATED : "Created",
ACCEPTED : "Accepted",
NON_AUTHORITATIVE_INFORMATION : "Non-authoritative information",
NO_CONTENT : "No content",
RESET_CONTENT : "Reset content",
PARTIAL_CONTENT : "Partial content",
MULTI_STATUS : "Multi-status",
MULTIPLE_CHOICE : "Multiple choice",
MOVED_PERMANENTLY : "Moved permanently",
FOUND : "Found",
SEE_OTHER : "See other",
NOT_MODIFIED : "Not modified",
USE_PROXY : "Use proxy",
TEMPORARY_REDIRECT : "Temporary redirect",
BAD_REQUEST : "Bad request",
UNAUTHORIZED : "Unauthorized",
PAYMENT_REQUIRED : "Payment required",
FORBIDDEN : "Forbidden",
NOT_FOUND : "Not found",
NOT_ALLOWED : "Not allowed",
NOT_ACCEPTABLE : "Not acceptable",
PROXY_AUTH_REQUIRED : "Proxy authentication required",
REQUEST_TIMEOUT : "Request timeout",
CONFLICT : "Conflict",
GONE : "Gone",
LENGTH_REQUIRED : "Length required",
PRECONDITION_FAILED : "Precondition failed",
REQUEST_ENTITY_TOO_LARGE : "Request entity too large",
REQUEST_URI_TOO_LONG : "Request URI too long",
UNSUPPORTED_MEDIA_TYPE : "Unsupported media type",
REQUESTED_RANGE_NOT_SATISFIABLE : "Requested range not satisfiable",
EXPECTATION_FAILED : "Expectation failed",
INTERNAL_SERVER_ERROR : "Internal server error",
NOT_IMPLEMENTED : "Not implemented",
BAD_GATEWAY : "Bad gateway",
SERVICE_UNAVAILABLE : "Service unavailable",
GATEWAY_TIMEOUT : "Gateway timeout",
VERSION_NOT_SUPPORTED : "HTTP version not supported",
INSUFFICIENT_STORAGE_SPACE : "Insufficient storage space",
NOT_EXTENDED : "Not extended",
}
def getStatus(code):
return STATUS.get(code, "unknown")
MULTIPART_FORM_DATA = 'multipart/form-data'
URLENCODED = 'application/x-www-form-urlencoded'
parseQueryArgs = cgi.parse_qs
def timegm(year, month, day, hour, minute, second):
"""Convert time tuple in GMT to seconds since epoch, GMT"""
EPOCH = 1970
assert year >= EPOCH
assert 1 <= month <= 12
days = 365*(year-EPOCH) + calendar.leapdays(EPOCH, year)
for i in range(1, month):
days = days + calendar.mdays[i]
if month > 2 and calendar.isleap(year):
days = days + 1
days = days + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def stringToDatetime(dateString):
"""Convert an HTTP date string to seconds since epoch."""
parts = dateString.split(' ')
day = int(parts[1])
month = int(monthname.index(parts[2]))
year = int(parts[3])
hour, min, sec = map(int, parts[4].split(':'))
return int(timegm(year, month, day, hour, min, sec))
class HttpRequest:
http_version = (1, 1)
http_version_string = ("HTTP/%d.%d" % http_version)
max_content_length = 10000
max_headers = 500
request_line = None
request_method = None
request_uri = None
request_path = None
request_query = None
request_version = None
content_length = 0
content = None
etag = None
close_connection = True
response_code = 200
response_status = "OK"
response_sent = False
cached = False
last_modified = None
forceSSL = False
def __init__(self, host, rin, out):
self.host = host
self.rin = rin
self.out = out
self.request_args = {}
self.args = self.request_args
self.request_headers = {}
self.request_cookies = {}
self.response_headers = {}
self.response_cookies = {}
self.output = StringIO()
self.parseRequest()
def isSecure(self):
return self.forceSSL
def getRequestMethod(self):
return self.request_method
def trim(self, str, ends):
for end in ends:
if str.endswith(end):
str = str[ : -len(end) ]
break
return str
def requestError(self, code, msg=None):
self.sendError(code, msg)
raise ValueError(self.response_status)
def sendError(self, code, msg=None):
self.setResponseCode(code, msg=msg)
self.sendResponse()
def parseRequestVersion(self, version):
try:
if not version.startswith('HTTP/'):
raise ValueError
version_string = version.split('/', 1)[1]
version_codes = version_string.split('.')
if len(version_codes) != 2:
raise ValueError
request_version = (int(version_codes[0]), int(version_codes[1]))
except (ValueError, IndexError):
self.requestError(400, "Bad request version (%s)" % `version`)
def parseRequestLine(self):
line = self.trim(self.request_line, ['\r\n', '\n'])
line_fields = line.split()
n = len(line_fields)
if n == 3:
[method, uri, version] = line_fields
elif n == 2:
[method, uri] = line_fields
version = 'HTTP/0.9'
else:
self.requestError(BAD_REQUEST,
"Bad request (%s)" % `line`)
request_version = self.parseRequestVersion(version)
if request_version > (2, 0):
self.requestError(VERSION_NOT_SUPPORTED,
"HTTP version not supported (%s)" % `version`)
#if request_version >= (1, 1) and self.http_version >= (1, 1):
# self.close_connection = False
#else:
# self.close_connection = True
self.request_method = method
self.method = method
self.request_uri = uri
self.request_version = version
uri_query = uri.split('?')
if len(uri_query) == 1:
self.request_path = uri
else:
self.request_path = uri_query[0]
self.request_query = uri_query[1]
self.request_args = parseQueryArgs(self.request_query)
self.args = self.request_args
def parseRequestHeaders(self):
header_bytes = ""
header_count = 0
while True:
if header_count >= self.max_headers:
self.requestError(BAD_REQUEST,
"Bad request (too many headers)")
line = self.rin.readline()
header_bytes += line
header_count += 1
if line == '\r\n' or line == '\n' or line == '':
break
header_input = StringIO(header_bytes)
self.request_headers = Message(header_input)
def parseRequestCookies(self):
cookie_hdr = self.getHeader("cookie")
if not cookie_hdr: return
for cookie in cookie_hdr.split(';'):
try:
cookie = cookie.lstrip()
(k, v) = cookie.split('=', 1)
self.request_cookies[k] = v
except ValueError:
pass
def parseRequestArgs(self):
if ((self.content is None) or
(self.request_method != "POST")):
return
content_type = self.getHeader('content-type')
if not content_type:
return
(encoding, params) = cgi.parse_header(content_type)
if encoding == URLENCODED:
xargs = cgi.parse_qs(self.content.getvalue(),
keep_blank_values=True)
elif encoding == MULTIPART_FORM_DATA:
xargs = cgi.parse_multipart(self.content, params)
else:
xargs = {}
self.request_args.update(xargs)
def getCookie(self, k):
return self.request_cookies[k]
def readContent(self):
try:
self.content_length = int(self.getHeader("Content-Length"))
except:
return
if self.content_length > self.max_content_length:
self.requestError(REQUEST_ENTITY_TOO_LARGE)
self.content = self.rin.read(self.content_length)
self.content = StringIO(self.content)
self.content.seek(0,0)
def parseRequest(self):
self.request_line = self.rin.readline()
self.parseRequestLine()
self.parseRequestHeaders()
self.parseRequestCookies()
connection_mode = self.getHeader('Connection')
self.setCloseConnection(connection_mode)
self.readContent()
self.parseRequestArgs()
def setCloseConnection(self, mode):
if not mode: return
mode = mode.lower()
if mode == 'close':
self.close_connection = True
elif (mode == 'keep-alive') and (self.http_version >= (1, 1)):
self.close_connection = False
def getCloseConnection(self):
return self.close_connection
def getHeader(self, k, v=None):
return self.request_headers.get(k, v)
def getRequestMethod(self):
return self.request_method
def getRequestPath(self):
return self.request_path
def setResponseCode(self, code, status=None, msg=None):
self.response_code = code
if not status:
status = getStatus(code)
self.response_status = status
def setResponseHeader(self, k, v):
k = k.lower()
self.response_headers[k] = v
if k == 'connection':
self.setCloseConnection(v)
setHeader = setResponseHeader
def setLastModified(self, when):
# time.time() may be a float, but the HTTP-date strings are
# only good for whole seconds.
when = long(math.ceil(when))
if (not self.last_modified) or (self.last_modified < when):
self.lastModified = when
modified_since = self.getHeader('if-modified-since')
if modified_since:
modified_since = stringToDatetime(modified_since)
if modified_since >= when:
self.setResponseCode(NOT_MODIFIED)
self.cached = True
def setContentType(self, ty):
self.setResponseHeader("Content-Type", ty)
def setEtag(self, etag):
if etag:
self.etag = etag
tags = self.getHeader("if-none-match")
if tags:
tags = tags.split()
if (etag in tags) or ('*' in tags):
if self.request_method in ("HEAD", "GET"):
code = NOT_MODIFIED
else:
code = PRECONDITION_FAILED
self.setResponseCode(code)
self.cached = True
def addCookie(self, k, v, expires=None, domain=None, path=None,
max_age=None, comment=None, secure=None):
cookie = v
if expires != None:
cookie += "; Expires=%s" % expires
if domain != None:
cookie += "; Domain=%s" % domain
if path != None:
cookie += "; Path=%s" % path
if max_age != None:
cookie += "; Max-Age=%s" % max_age
if comment != None:
cookie += "; Comment=%s" % comment
if secure:
cookie += "; Secure"
self.response_cookies[k] = cookie
def sendResponseHeaders(self):
if self.etag:
self.setResponseHeader("ETag", self.etag)
for (k, v) in self.response_headers.items():
self.send("%s: %s\r\n" % (k.capitalize(), v))
for (k, v) in self.response_cookies.items():
self.send("Set-Cookie: %s=%s\r\n" % (k, v))
self.send("\r\n")
def sendResponse(self):
if self.response_sent:
return
self.response_sent = True
send_body = self.hasBody()
if not self.close_connection:
self.setResponseHeader("Connection", "keep-alive")
self.setResponseHeader("Pragma", "no-cache")
self.setResponseHeader("Cache-Control", "no-cache")
self.setResponseHeader("Expires", | |
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
import numpy as np
import scipy.spatial
import random
from shapely.geometry import Point, Polygon
from shapely.ops import nearest_points
from grgen.auxiliary import Timer
from grgen.auxiliary import Plotter
"""
Implementation of the Kohonen self-organizing map where a grid is trained to represent some input geometry.
"""
class Kohonen:
""" The class of the self-organizing map """
def __init__(self, spacing, geometry, dim=2, s=0.1, iterations=None, iterationsFactor=1, minRadius=None, maxRadius=None, batchSize=None, vertexType="triangular"):
""" Initialization of the Kohonen class
:param spacing: approximation of the grid spacing used to build the initial grid
:param geometry: geometry as sets of vertices inside a list. First entry is the outer boundary, second
is the inner boundary. Only one inner boundary is supported.
:param dim: currently only 2D
:param s: constant for the lateral connection of two neurons
:param iterations: maximum number of iterations
:param iterationsFactor: Factor to increase/decrease default iteration number
:param minRadius: minimum radius
:param maxRadius: maximum radius
:param batchSize: size of the training data for mini-batch learning
:param vertexType: "triangular", "rectangular" TODO implement rectangular
"""
self.spacing = spacing
self.geometry = geometry
self.dim = dim
self.s = s
self.iterations = iterations
self.minRadius = minRadius
self.maxRadius = maxRadius
self.batchSize = batchSize
self.vertexType = vertexType
# Weights of the Kohonen network. Also the grid coordinates of all cells.
self.weights = None
self.startWeights = None
# The position of coordinates can be fixed by using this array of booleans.
self.noPoints = None
self.noInternalPoints = None
self.noBoundaryPoints = None
self.noCells = None
# Minimum and maximum coordinates of the geometry
self.boundingBox = None
self.eps = 10e-12
self.dataType = np.float32
# Storage for the learning operation
self.tmpWeight = None
self.geometryProbability = None
self.vertexProbability = None
# Fixed topology of the grid
self.connection = None
self.neighbors = None
self.boundary = None
self.boundaryIdx = None
self.innerIdx = None
self.boundaryId = None
self.boundaryFace = None
# auxiliary
self.timer = Timer()
self.plotter = None
# Initialize som algorithm
# 1) Calculate bounding box and radius
self.calculateBoundingBox()
if maxRadius == None:
delta = np.subtract(self.boundingBox[:,1], self.boundingBox[:,0])
self.maxRadius = np.max(delta) + 10*spacing
if minRadius == None:
self.minRadius = 2*spacing
# 2) Initialize weights of the network
self.buildWeights()
# 3) Remove coordinates inside inner geometry or outside outer boundary
self.removeGridCoordinates()
# 3) Build the grid topology (connections, cell neighbors, ...)
self.buildGridTopology()
if iterations == None:
self.iterations = self.noPoints
self.iterations = int(iterationsFactor*self.iterations)
self.calculateBoundaryProbability()
def maskCornerPoints(self):
""" move the corner points on the corner of the outer geometry and fix their positions """
removeIndices = list()
for c in self.geometry[0]:
tmp=tf.cast(c, dtype=self.dataType)
neighbor = self.findNN(tf.gather(self.weights, self.boundaryIdx), tmp)
tf.compat.v1.scatter_update(self.weights, tf.Variable(self.boundaryIdx[neighbor], dtype=np.int64), tmp)
removeIndices.append(neighbor)
self.boundaryIdx = np.delete(self.boundaryIdx, removeIndices)
def findNN(self, searchSet, coordinates):
""" find the nearest neighbor and return its index
:param searchSet: set where the neighbor is searched
:param coordinates: the point that is searched for
"""
# squared euclidean distance of all weights to the input coordinates
squaredDistance = tf.reduce_sum( (searchSet - tf.expand_dims(coordinates, axis=0))**2, axis = 1)
# return the best matching unit
return tf.argmin(squaredDistance, axis=0)
def calculateBoundingBox(self):
""" Calculate the bounding box of the input geometry """
self.timer.startTimer("calculateBoundingBox")
boundingBox = np.zeros((self.dim, 2, len(self.geometry)))
index = 0
for g in self.geometry:
boundingBox[0, 0, index] = np.min(g[:,0])
boundingBox[0, 1, index] = np.max(g[:,0])
boundingBox[1, 0, index] = np.min(g[:,1])
boundingBox[1, 1, index] = np.max(g[:,1])
index += 1
a = np.min(boundingBox[:,0,:], axis =1).reshape(-1,1)
b = np.max(boundingBox[:,1,:], axis =1).reshape(-1,1)
self.boundingBox = np.concatenate((a, b), axis = 1)
self.timer.stopTimer("calculateBoundingBox")
def buildWeights(self):
""" Calculate weights (the initial coordinates of the grid) """
self.timer.startTimer("buildWeights")
minX = self.boundingBox[0,0]
minY = self.boundingBox[1,0]
maxX = self.boundingBox[0,1]
maxY = self.boundingBox[1,1]
if(self.vertexType == "triangular"):
spacingY = np.sqrt(self.spacing**2 - (self.spacing/2)**2)
else:
spacingY = self.spacing
rangeX = np.arange(minX-3*self.spacing, maxX+3*self.spacing, self.spacing)
rangeY = np.arange(minY-3*spacingY, maxY+3*spacingY, spacingY)
x, y = np.meshgrid(rangeX, rangeY)
if(self.vertexType == "triangular"):
x[::2,:]+=self.spacing/2
x = x.reshape(-1,1)
y = y.reshape(-1,1)
self.weights = tf.Variable(np.concatenate((x, y), axis = 1), dtype=self.dataType)
self.noPoints = tf.shape(self.weights)[0]
self.timer.stopTimer("buildWeights")
def removeGridCoordinates(self):
""" Remove coordinates inside geometry """
self.timer.startTimer("removeGridCoordinates")
removeCoord = np.ones((tf.shape(self.weights)[0]), dtype=bool)
inner = Polygon(self.geometry[1])
outer = Polygon(self.geometry[0])
for i in range(0, np.shape(self.weights)[0]):
point = Point(self.weights[i,0], self.weights[i,1])
if(inner.contains(point)):
removeCoord[i] = False
else:
if(outer.contains(point)):
removeCoord[i] = True
else:
removeCoord[i] = False
self.weights = tf.Variable(tf.boolean_mask(self.weights, removeCoord), dtype=self.dataType)
self.startWeights = self.weights
self.noPoints = tf.shape(self.weights)[0]
self.timer.stopTimer("removeGridCoordinates")
def buildGridTopology(self):
""" Grid topology """
self.timer.startTimer("buildGridTopology")
triangulation = scipy.spatial.Delaunay(self.weights.numpy())
self.connection = triangulation.simplices
self.neighbors = triangulation.neighbors
it = 0
remove = list()
for x in self.connection:
vertex = tf.gather(self.weights, x, axis=0)
minimum = tf.math.reduce_min(vertex, axis=0)
maximum = tf.math.reduce_max(vertex, axis=0)
if((maximum[0]-minimum[0])*(maximum[1]-minimum[1])/2 > self.spacing**2/2+self.eps):
remove.append(it)
it+=1
self.connection =np.delete(self.connection, remove, axis=0)
self.neighbors[np.isin(self.neighbors, remove)] = -1
self.neighbors = np.delete(self.neighbors, remove, axis=0)
self.boundary = np.argwhere(self.neighbors < 0)
tmpBndry = list()
for b in self.boundary:
if (b[1]==0):
tmpBndry.append(self.connection[b[0],1])
tmpBndry.append(self.connection[b[0],2])
if (b[1]==1):
tmpBndry.append(self.connection[b[0],2])
tmpBndry.append(self.connection[b[0],0])
if (b[1]==2):
tmpBndry.append(self.connection[b[0],0])
tmpBndry.append(self.connection[b[0],1])
self.boundaryIdx = np.unique(np.array(tmpBndry))
self.innerIdx = np.arange(0, self.noPoints, 1, dtype=np.int32)
self.innerIdx = np.delete(self.innerIdx, self.boundaryIdx)
self.noCells = np.shape(self.connection)[0]
self.noInternalPoints = np.shape(self.innerIdx)[0]
self.noBoundaryPoints = np.shape(self.boundaryIdx)[0]
self.timer.stopTimer("buildGridTopology")
def produceRandomInput(self, tensorflow=True):
""" produce random point for the learning step
:param tensorflow: return as tensorflow or numpy variable
"""
self.timer.startTimer("produceRandomInput")
minX = self.boundingBox[0,0]
minY = self.boundingBox[1,0]
maxX = self.boundingBox[0,1]
maxY = self.boundingBox[1,1]
inner = Polygon(self.geometry[1])
outer = Polygon(self.geometry[0])
while(True):
randomCoordinate = np.array([random.uniform(minX, maxX), random.uniform(minY, maxY)])
point = Point(randomCoordinate[0], randomCoordinate[1])
if(inner.contains(point)):
continue
else:
if(outer.contains(point)):
if (tensorflow):
return tf.Variable(randomCoordinate, dtype=self.dataType)
else:
return randomCoordinate
else:
continue
self.timer.stopTimer("produceRandomInput")
def calculateBoundaryProbability(self):
""" helper function for produceRandomInputBoundary() """
self.geometryProbability = list()
self.vertexProbability = list()
for idx in range(0, len(self.geometry)):
self.vertexProbability.append( np.sqrt(np.sum((self.geometry[idx] - np.roll(self.geometry[idx], 1, axis=0))**2, axis=1)) )
self.geometryProbability.append( np.sum(self.vertexProbability[idx], axis=0) )
self.vertexProbability[idx] = self.vertexProbability[idx]/np.sum(self.vertexProbability[idx])
self.geometryProbability = self.geometryProbability/np.sum(self.geometryProbability)
def produceRandomInputBoundary(self, tensorflow=True):
""" produce random point for the learning step on the boundary
:param tensorflow: return as tensorflow or numpy variable
"""
self.timer.startTimer("produceRandomInputBoundary")
idx = np.random.choice(len(self.geometry), size = 1, p=self.geometryProbability )
idx=int(idx)
nbr = np.shape(self.geometry[idx])[0]
v = np.random.choice(nbr, size = 1, p=self.vertexProbability[idx])
minX = self.geometry[idx][v,0]
minY = self.geometry[idx][v,1]
maxX = np.roll(self.geometry[idx], 1, axis=0)[v,0]
maxY = np.roll(self.geometry[idx], 1, axis=0)[v,1]
randomCoordinate = np.array([random.uniform(minX, maxX), random.uniform(minY, maxY)]).reshape(-1,)
self.timer.stopTimer("produceRandomInputBoundary")
if (tensorflow):
return tf.Variable(randomCoordinate, dtype=self.dataType)
else:
return randomCoordinate
def produceRandomBatch(self):
""" produce a batch of random points """
batchData = np.zeros((self.batchSize, self.dim))
for i in range(0, self.batchSize):
batchData[i,:] = self.produceRandomInputBoundary(False)
return tf.Variable(batchData, dtype=self.dataType)
def moveBoundaryPoints(self):
""" move boundary weights/points on the geometry boundary """
self.timer.startTimer("moveBoundaryPoints")
inner = Polygon(self.geometry[1])
outer = Polygon(self.geometry[0])
movement = np.zeros((np.shape(self.boundaryIdx)[0],2))
weightsBoundary = tf.Variable(tf.gather(self.weights, self.boundaryIdx), dtype=self.dataType).numpy()
for idx in range(0, np.shape(self.boundaryIdx)[0]):
point = Point(weightsBoundary[idx,0], weightsBoundary[idx,1])
pOuter, p = nearest_points(outer.boundary, point)
pInner, p = nearest_points(inner.boundary, point)
if(point.distance(pInner) > point.distance(pOuter)):
movement[idx,0] = pOuter.x
movement[idx,1] = pOuter.y
else:
movement[idx,0] = pInner.x
movement[idx,1] = pInner.y
print(np.shape(self.boundaryIdx))
print(np.shape(self.boundaryIdx))
tf.compat.v1.scatter_update(self.weights, self.boundaryIdx, movement)
self.timer.stopTimer("moveBoundaryPoints")
def trainingOperation(self, inputData, searchSet, searchSetStart, trainingSetStart, delta, radius, k=0, boundaryTraining = False):
""" ordering stage for all cells
:param inputData: random training data
:param searchSet: set for nearest neighbor search
:param searchSetStart: coordinate of the bmu is stored here
:param trainingSetStart: set for neighborhood calculation
:param delta: learning rate
:param radius: learning radius
:param k: parameter to eliminate the border | |
overlap is ~1
def cost_function(new_tensors):
new_state = state.with_different_tensors(new_tensors)
overlap = self.evolution_pepo.matrix_element(new_state, state, **contraction_options)
res = np.abs(overlap)
return res * np.exp(self.dt * state_energy)
else:
def cost_function(new_tensors):
new_state = state.with_different_tensors(new_tensors)
overlap = self.evolution_pepo.matrix_element(new_state, state, **contraction_options)
return np.abs(overlap)
# initial guess
if initial == 'old':
initial_tensors = state.get_tensors()
elif initial == 'approximate':
initial_tensors = approximate_pepo_peps(self.evolution_pepo, state).get_tensors()
elif initial == 'u_site':
u_site = get_u_site(g=self.g, dt=self.dt, dtype=self.pepo_dtype, real_time=self.real_time)
initial_tensors = absorb_u_site(state, u_site).get_tensors()
else:
raise ValueError(f'Invalid initial keyword: {initial}')
# random deviation
if random_dev:
amplitude = abs(random_dev * tree_avg_abs_nonzero(initial_tensors))
initial_tensors = tree_map(lambda arr: arr + uniform(arr.shape, arr.dtype, min_val=-amplitude,
max_val=amplitude), initial_tensors)
# maximise overlap
optimal_tensors, optimal_overlap, info = maximise(cost_function, initial_tensors, **optimisation_options)
optimal_state = state.with_different_tensors(optimal_tensors)
if overlap_threshold and self.real_time and (optimal_overlap < overlap_threshold):
warn(f'Optimal overlap is below threshold: overlap = {optimal_overlap} < {optimal_overlap} = threshold')
# phase correction
if isinstance(optimal_state, Peps):
ov = self.evolution_pepo.matrix_element(optimal_state, state, **contraction_options)
phase_factor = ov / np.abs(ov)
optimal_state.absorb_factor(phase_factor)
else:
warn('Losing global phase information in time evolution.')
return optimal_state
def get_hamiltonian(g: float, bc: str, lx: int, ly: int, dtype: np.dtype) -> Operator:
if bc in [OBC, PBC]:
o = np.zeros_like(s0)
C = np.array([[s0, o, o], [-sx, o, o], [-.5 * g * sz, sx, s0]], dtype=dtype)
C = np.transpose(C, [2, 3, 0, 1]) # (i,j,p,p*) -> (p,p*,i,j)
I = np.array([[o, o, o], [o, o, o], [s0, o, o]], dtype=dtype)
I = np.transpose(I, [2, 3, 0, 1]) # (i,j,p,p*) -> (p,p*,i,j)
O = np.zeros_like(I)
D = np.array([[I, O], [C, I]], dtype=dtype)
D = np.transpose(D, [2, 3, 4, 5, 0, 1]) # (k,l,p,p*,i,j) -> (p,p*,i,j,k,l)
return NnPepo(C, D, bc=bc, lx=lx, ly=ly, hermitian=True)
elif bc == INFINITE:
xx = np.einsum('ij,kl->ikjl', sx, sx) # (p,p*) & (p,p*) -> (p1,p2,p1*,p2*)
Iz = np.einsum('ij,kl->ikjl', s0, sz)
zI = np.einsum('ij,kl->ikjl', sz, s0)
h_bond = 2 * (- xx - g / 4. * Iz - g / 4. * zI)
# factor 2: hor + vert = 2 * hor
# factors 1/4. : four bonds per site
operator_geometry = np.array([[0, 1]])
return LocalOperator(h_bond, operator_geometry, hermitian=True)
else:
raise ValueError('invalid bc')
def get_u_site(g: float, dt: float, dtype: np.dtype, real_time: bool):
if real_time:
return np.asarray(expm(1j * g * dt * sz), dtype=dtype)
else:
np.asarray(expm(g * dt * sz), dtype=dtype)
def evolution_pepo_real_time(g: float, dt: float, bc: str, dtype: np.dtype,
lx: Optional[int] = None, ly: Optional[int] = None) -> Operator:
# PEPO for U(dt) ~ U_vert(dt/2) U_bond(dt) U_vert(dt/2)
#
# half bond operators:
#
# | | | |
# U_bond = A -- A
# | | | |
#
# expm(-i H_bond dt) = expm(-i (-XX) dt) = expm(i dt XX) = cos(dt) + i sin(dt) XX = A_0 A_0 + A_1 A_1
# with A_0 = (cos(dt) ** 0.5) * 1 , A_1 = (i sin(dt)) ** 0.5 * X
# A & B legs: (p,p*,k)
A = np.zeros([2, 2, 2], dtype=dtype) # u_hb(p,p*,a)
# A[:,:,0] = (np.cos(dt)) ** 0.5 * s0
# A[:,:,1] = (1.j * np.sin(dt)) ** 0.5 * sx
A = index_update(A, index[:, :, 0], (np.cos(dt)) ** 0.5 * s0)
A = index_update(A, index[:, :, 1], (1.j * np.sin(dt)) ** 0.5 * sx)
# expm(- i H_vert dt/2) = expm(- i(-gZ) dt/2) = expm(i/2 g dt Z)
u_vert = np.asarray(expm(.5j * g * dt * sz), dtype=dtype)
return _build_evolution_pepo(u_vert, A, bc, lx, ly)
def evolution_pepo_imag_time(g: float, dt: float, bc: str, dtype: np.dtype,
lx: Optional[int] = None, ly: Optional[int] = None) -> Operator:
# PEPO for U(dt) ~ U_vert(dt/2) U_bond(dt) U_vert(dt/2)
#
# half bond operators:
#
# | | | |
# U_bond = A -- A
# | | | |
#
# expm(- H_bond dt) = expm(- (-XX) dt) = expm(dt XX) = cosh(dt) + sinh(dt) XX = A_0 A_0 + A_1 A_1
# with A_0 = (cosh(dt) ** 0.5) * 1 , A_1 = (sinh(dt) ** 0.5) * X
# A & B legs: (p,p*,k)
A = np.empty([2, 2, 2], dtype=dtype)
A = index_update(A, index[:, :, 0], (np.cosh(dt) ** 0.5) * s0)
A = index_update(A, index[:, :, 1], (np.sinh(dt) ** 0.5) * sx)
# expm(- H_vert dt/2) = expm(- (-gZ) dt/2) = expm(g dt/2 Z)
u_vert = np.asarray(expm(g * (dt / 2) * sz), dtype=dtype)
return _build_evolution_pepo(u_vert, A, bc, lx, ly)
def _build_evolution_pepo(u_vert, A, bc: str,
lx: Optional[int] = None, ly: Optional[int] = None) -> Operator:
# DOC : lx, ly have no effect for INFINITE
u_bulk = ncon([u_vert, A, A, A, A, u_vert], [[1, -2], [2, 1, -6], [3, 2, -3], [4, 3, -4], [5, 4, -5], [-1, 5]])
if bc == PBC:
if (lx is None) or (ly is None):
raise ValueError
tensors = [[u_bulk for _ in range(ly)] for _ in range(lx)]
return Pepo(tensors, bc=PBC, hermitian=False)
elif bc == OBC:
if (lx is None) or (ly is None):
raise ValueError
u_edge = ncon([u_vert, A, A, A, u_vert], [[1, -2], [2, 1, -5], [3, 2, -3], [4, 3, -4], [-1, 4]])
u_corner = ncon([u_vert, A, A, u_vert], [[1, -2], [2, 1, -3], [3, 2, -4], [-1, 3]])
tensors = [[u_corner] + [u_edge for _ in range(ly - 2)] + [u_corner]] \
+ [[u_edge] + [u_bulk for _ in range(ly - 2)] + [u_edge] for _ in range(lx - 2)] \
+ [[u_corner] + [u_edge for _ in range(ly - 2)] + [u_corner]]
return Pepo(tensors, bc=OBC, hermitian=False)
elif bc == INFINITE:
return Ipepo([u_bulk], unit_cell=np.array([[0]]), symmetry='c4v', hermitian=False)
raise ValueError(f'Unknown boundary conditions: {bc}')
def xx_correlator_timeslice(groundstate: State, evolved_quenched_state: State,
gs_energy: float, t: float, contraction_options: Optional[dict] = None) -> np.ndarray:
"""
Correlation function <S^x(t)S^x(0)> where < . > is the expval w.r.t the groundstate
for all positions of S^x(t) (output array dimensions)
where the position of S^x(0) is implicitly defined via `evolved_quenched`
Parameters
----------
groundstate
PEPS for the groundstate
evolved_quenched_state
PEPS for exp(-iHt)S^x|GS>
gs_energy
<GS| H |Gs>
t
contraction_options
Returns
-------
xx_correlators : jax.numpy.ndarray
`xx_correlators[x,y]` is <S^x(t)S^x(0)> for S^x(t) acting on site (x,y)
"""
if contraction_options is None:
contraction_options = {}
return correlator_timeslice(groundstate, evolved_quenched_state, sx, gs_energy, t, **contraction_options)
def zz_correlator_timeslice(groundstate: State, evolved_quenched_state: State,
gs_energy: float, t: float, contraction_options: Optional[dict] = None) -> np.ndarray:
"""
Correlation function <S^z(t)S^z(0)> where < . > is the expval w.r.t the groundstate
for all positions of S^z(t) (output array dimensions)
where the position of S^z(0) is implicitly defined via `evolved_quenched`
Parameters
----------
groundstate
PEPS for the groundstate
evolved_quenched_state
PEPS for exp(-iHt)S^z|GS>
gs_energy
<GS| H |Gs>
t
contraction_options
Returns
-------
xx_correlators : jax.numpy.ndarray
`xx_correlators[x,y]` is <S^x(t)S^x(0)> for S^x(t) acting on site (x,y)
"""
if contraction_options is None:
contraction_options = {}
return correlator_timeslice(groundstate, evolved_quenched_state, sz, gs_energy, t, **contraction_options)
def x_snapshot(state: State, contraction_options: Optional[dict] = None) -> array:
"""
<state| S^x | state> for all positions of S^x
"""
if contraction_options is None:
contraction_options = {}
return expval_snapshot(state, sx, hermitian=True, **contraction_options)
def y_snapshot(state: State, contraction_options: Optional[dict] = None) -> array:
"""
<state| S^x | state> for all positions of S^x
"""
if contraction_options is None:
contraction_options = {}
return expval_snapshot(state, sy, hermitian=True, **contraction_options)
def z_snapshot(state: State, contraction_options: Optional[dict] = None) -> array:
"""
<state| S^z | state> for all positions of S^z
"""
if contraction_options is None:
contraction_options = {}
return expval_snapshot(state, sz, hermitian=True, **contraction_options)
def _parse_initial_state(initial_state, chi, bc, lx, ly, g, initial_noise, complex_tensors=False):
# Default
if not initial_state:
initial_state = 'ps'
if initial_noise is None:
initial_noise = 0.05
gc = 3.05
state = None
if isinstance(initial_state, Peps) or isinstance(initial_state, Ipeps):
if isinstance(initial_state, Peps) and (bc not in [PBC, OBC]):
raise ValueError(f'initial state of type PEPS is invalid for {bc} boundary conditions')
if isinstance(initial_state, Ipeps) and (bc != INFINITE):
raise ValueError(f'initial state of type IPEPS is invalid for {bc} boundary conditions')
state = initial_state
if type(initial_state) == str:
if initial_state == 'ps':
initial_state = state_z_plus if g > gc else state_x_plus
elif initial_state == 'z+':
initial_state = state_z_plus
elif initial_state == 'x+':
initial_state = state_x_plus
else:
raise ValueError(f'Initial state keyword {initial_state} not supported')
if not state:
if bc in [OBC, PBC]:
state = product_peps(chi=chi, bc=bc, state=initial_state, lx=lx, ly=ly)
else:
| |
"""
A test suite for the different solvers.
"""
import unittest
import NTPolySwig as nt
import warnings
from scipy.sparse import csr_matrix
from scipy.io import mmread
from mpi4py import MPI
from helpers import result_file, log_file
# MPI global communicator.
comm = MPI.COMM_WORLD
warnings.filterwarnings(action="ignore", module="scipy",
message="^internal gelsd")
class TestSolvers(unittest.TestCase):
'''A test class for the different kinds of solvers.'''
from os.path import join
from helpers import scratch_dir
# First input file.
input_file = join(scratch_dir, "input.mtx")
# Second input file.
input_file2 = join(scratch_dir, "input2.mtx")
# Matrix to compare against.
CheckMat = 0
# Rank of the current process.
my_rank = 0
# Dimension of the matrices to test.
mat_dim = 31
@classmethod
def setUpClass(self):
from os import environ
'''Set up all of the tests.'''
rows = int(environ['PROCESS_ROWS'])
columns = int(environ['PROCESS_COLUMNS'])
slices = int(environ['PROCESS_SLICES'])
nt.ConstructGlobalProcessGrid(rows, columns, slices)
@classmethod
def tearDownClass(self):
'''Cleanup this test'''
nt.DestructGlobalProcessGrid()
def setUp(self):
'''Set up all of the tests.'''
# Rank of the current process.
self.my_rank = comm.Get_rank()
# Parameters for iterative solvers.
self.isp = nt.SolverParameters()
# Parameters for fixed solvers.
self.fsp = nt.SolverParameters()
self.fsp.SetVerbosity(True)
self.isp.SetVerbosity(True)
if nt.GetGlobalIsRoot():
nt.ActivateLogger(log_file, True)
def tearDown(self):
from yaml import load, dump, SafeLoader
from sys import stdout
if nt.GetGlobalIsRoot():
nt.DeactivateLogger()
with open(log_file) as ifile:
data = load(ifile, Loader=SafeLoader)
dump(data, stdout)
def create_matrix(self, SPD=None, scaled=None, diag_dom=None, rank=None):
'''
Create the test matrix with the following parameters.
'''
from scipy.sparse import rand, identity
mat = rand(self.mat_dim, self.mat_dim, density=1.0)
mat = mat + mat.T
if SPD:
mat = mat.T.dot(mat)
if diag_dom:
identity_matrix = identity(self.mat_dim)
mat = mat + identity_matrix * self.mat_dim
if scaled:
mat = (1.0 / self.mat_dim) * mat
if rank:
mat = mat[rank:].dot(mat[rank:].T)
return csr_matrix(mat)
def write_matrix(self, mat, file_name):
from scipy.io import mmwrite
if self.my_rank == 0:
mmwrite(file_name, csr_matrix(mat))
comm.barrier()
def check_result(self):
'''Compare two computed matrices.'''
from helpers import THRESHOLD
from scipy.sparse.linalg import norm
normval = 0
relative_error = 0
if (self.my_rank == 0):
ResultMat = mmread(result_file)
normval = abs(norm(self.CheckMat - ResultMat))
relative_error = normval / norm(self.CheckMat)
print("\nNorm:", normval)
print("Relative_Error:", relative_error)
global_error = comm.bcast(relative_error, root=0)
self.assertLessEqual(global_error, THRESHOLD)
def check_diag(self):
'''Compare two diagonal matrices.'''
from helpers import THRESHOLD
from numpy.linalg import norm as normd
from scipy.sparse.linalg import norm
from numpy import diag, sort
normval = 0
relative_error = 0
if (self.my_rank == 0):
ResultMat = sort(diag(mmread(result_file).todense()))
CheckDiag = sort(diag(self.CheckMat.todense()))
normval = abs(normd(CheckDiag - ResultMat))
relative_error = normval / norm(self.CheckMat)
print("\nNorm:", normval)
print("Relative_Error:", relative_error)
global_error = comm.bcast(relative_error, root=0)
self.assertLessEqual(global_error, THRESHOLD)
def test_invert(self):
'''Test routines to invert matrices.'''
from scipy.sparse.linalg import inv
from scipy.sparse import csc_matrix
# Starting Matrix
matrix1 = self.create_matrix()
self.write_matrix(matrix1, self.input_file)
# Check Matrix
self.CheckMat = inv(csc_matrix(matrix1))
# Result Matrix
overlap_matrix = nt.Matrix_ps(self.input_file, False)
inverse_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(overlap_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.InverseSolvers.Invert(overlap_matrix, inverse_matrix, self.isp)
inverse_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_pseudoinverse(self):
'''Test routines to compute the pseudoinverse of matrices.'''
from scipy.linalg import pinv
# Starting Matrix.
matrix1 = self.create_matrix(rank=int(self.mat_dim / 2))
self.write_matrix(matrix1, self.input_file)
# Check Matrix
self.CheckMat = csr_matrix(pinv(matrix1.todense()))
# Result Matrix
overlap_matrix = nt.Matrix_ps(self.input_file, False)
inverse_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(overlap_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.InverseSolvers.PseudoInverse(overlap_matrix, inverse_matrix,
self.isp)
inverse_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_inversesquareroot(self):
'''Test routines to compute the inverse square root of matrices.'''
from scipy.linalg import funm
from numpy import sqrt
# Starting Matrix. Care taken to make sure eigenvalues are positive.
matrix1 = self.create_matrix(SPD=True, diag_dom=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: 1.0 / sqrt(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
overlap_matrix = nt.Matrix_ps(self.input_file, False)
inverse_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(overlap_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.SquareRootSolvers.InverseSquareRoot(overlap_matrix, inverse_matrix,
self.isp)
inverse_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_squareroot(self):
'''Test routines to compute the square root of matrices.'''
from scipy.linalg import funm
from numpy import sqrt
# Starting Matrix. Care taken to make sure eigenvalues are positive.
matrix1 = self.create_matrix(SPD=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: sqrt(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
root_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.SquareRootSolvers.SquareRoot(input_matrix, root_matrix, self.isp)
root_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_inverseroot(self):
'''Test routines to compute general matrix inverse root.'''
from scipy.linalg import funm
from numpy import power
roots = [1, 2, 3, 4, 5, 6, 7, 8]
for root in roots:
print("Root:", root)
# Starting Matrix. Care taken to make sure eigenvalues are
# positive.
matrix1 = self.create_matrix(diag_dom=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(),
lambda x: power(x, -1.0 / root))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
inverse_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.RootSolvers.ComputeInverseRoot(input_matrix, inverse_matrix,
root, self.isp)
inverse_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_root(self):
'''Test routines to compute general matrix root.'''
from scipy.linalg import funm
from numpy import power
roots = [1, 2, 3, 4, 5, 6, 7, 8]
for root in roots:
print("Root", root)
# Starting Matrix. Care taken to make sure eigenvalues are
# positive.
matrix1 = self.create_matrix(diag_dom=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(),
lambda x: power(x, 1.0 / root))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
inverse_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.RootSolvers.ComputeRoot(input_matrix, inverse_matrix,
root, self.isp)
inverse_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_signfunction(self):
'''Test routines to compute the matrix sign function.'''
from scipy.linalg import funm
from numpy import sign
# Starting Matrix
matrix1 = self.create_matrix()
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: sign(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
sign_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.SignSolvers.ComputeSign(input_matrix, sign_matrix, self.isp)
sign_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_exponentialfunction(self):
'''Test routines to compute the matrix exponential.'''
from scipy.linalg import funm
from numpy import exp
# Starting Matrix
matrix1 = 8 * self.create_matrix(scaled=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: exp(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
exp_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.fsp.SetLoadBalance(permutation)
nt.ExponentialSolvers.ComputeExponential(input_matrix, exp_matrix,
self.fsp)
exp_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_exponentialpade(self):
'''
Test routines to compute the matrix exponential using the pade method.
'''
from scipy.linalg import funm
from numpy import exp
# Starting Matrix
matrix1 = 8 * self.create_matrix(scaled=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: exp(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
exp_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.isp.SetLoadBalance(permutation)
nt.ExponentialSolvers.ComputeExponentialPade(input_matrix, exp_matrix,
self.isp)
exp_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_logarithmfunction(self):
'''Test routines to compute the matrix logarithm.'''
from scipy.linalg import funm
from numpy import log
# Starting Matrix. Care taken to make sure eigenvalues are positive.
matrix1 = self.create_matrix(scaled=True, diag_dom=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: log(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
log_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.fsp.SetLoadBalance(permutation)
nt.ExponentialSolvers.ComputeLogarithm(input_matrix, log_matrix,
self.fsp)
log_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_exponentialround(self):
'''
Test routines to compute the matrix exponential using a round
trip calculation.
'''
matrix1 = 0.125 * self.create_matrix(scaled=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
self.CheckMat = csr_matrix(matrix1)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
exp_matrix = nt.Matrix_ps(self.mat_dim)
round_matrix = nt.Matrix_ps(self.mat_dim)
nt.ExponentialSolvers.ComputeExponential(input_matrix, exp_matrix,
self.fsp)
nt.ExponentialSolvers.ComputeLogarithm(exp_matrix, round_matrix,
self.fsp)
round_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_sinfunction(self):
'''Test routines to compute the matrix sine.'''
from scipy.linalg import funm
from numpy import sin
# Starting Matrix
matrix1 = self.create_matrix()
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: sin(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
sin_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.fsp.SetLoadBalance(permutation)
nt.TrigonometrySolvers.Sine(input_matrix, sin_matrix, self.fsp)
sin_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_cosfunction(self):
'''Test routines to compute the matrix cosine.'''
from scipy.linalg import funm
from numpy import cos
# Starting Matrix
matrix1 = self.create_matrix()
self.write_matrix(matrix1, self.input_file)
# Check Matrix
dense_check = funm(matrix1.todense(), lambda x: cos(x))
self.CheckMat = csr_matrix(dense_check)
# Result Matrix
input_matrix = nt.Matrix_ps(self.input_file, False)
cos_matrix = nt.Matrix_ps(self.mat_dim)
permutation = nt.Permutation(input_matrix.GetLogicalDimension())
permutation.SetRandomPermutation()
self.fsp.SetLoadBalance(permutation)
nt.TrigonometrySolvers.Cosine(input_matrix, cos_matrix, self.fsp)
cos_matrix.WriteToMatrixMarket(result_file)
comm.barrier()
self.check_result()
def test_hornerfunction(self):
'''
Test routines to compute a matrix polynomial using horner's
method.
'''
from numpy.linalg import eigh
from numpy import diag, dot
# Coefficients of the polynomial
coef = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625]
# Starting Matrix
matrix1 = self.create_matrix(scaled=True)
self.write_matrix(matrix1, self.input_file)
# Check Matrix
val, vec = eigh(matrix1.todense())
for i in range(0, len(val)):
temp = val[i]
val[i] = 0
for j in range(0, len(coef)):
| |
"//" u_expr| m_expr "/" u_expr |
m_expr "%" u_expr
a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr
The "*" (multiplication) operator yields the product of its arguments.
The arguments must either both be numbers, or one argument must be an
integer and the other must be a sequence. In the former case, the
numbers are converted to a common type and then multiplied together.
In the latter case, sequence repetition is performed; a negative
repetition factor yields an empty sequence.
The "@" (at) operator is intended to be used for matrix
multiplication. No builtin Python types implement this operator.
New in version 3.5.
The "/" (division) and "//" (floor division) operators yield the
quotient of their arguments. The numeric arguments are first
converted to a common type. Division of integers yields a float, while
floor division of integers results in an integer; the result is that
of mathematical division with the 'floor' function applied to the
result. Division by zero raises the "ZeroDivisionError" exception.
The "%" (modulo) operator yields the remainder from the division of
the first argument by the second. The numeric arguments are first
converted to a common type. A zero right argument raises the
"ZeroDivisionError" exception. The arguments may be floating point
numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals "4*0.7 +
0.34".) The modulo operator always yields a result with the same sign
as its second operand (or zero); the absolute value of the result is
strictly smaller than the absolute value of the second operand [1].
The floor division and modulo operators are connected by the following
identity: "x == (x//y)*y + (x%y)". Floor division and modulo are also
connected with the built-in function "divmod()": "divmod(x, y) ==
(x//y, x%y)". [2].
In addition to performing the modulo operation on numbers, the "%"
operator is also overloaded by string objects to perform old-style
string formatting (also known as interpolation). The syntax for
string formatting is described in the Python Library Reference,
section printf-style String Formatting.
The floor division operator, the modulo operator, and the "divmod()"
function are not defined for complex numbers. Instead, convert to a
floating point number using the "abs()" function if appropriate.
The "+" (addition) operator yields the sum of its arguments. The
arguments must either both be numbers or both be sequences of the same
type. In the former case, the numbers are converted to a common type
and then added together. In the latter case, the sequences are
concatenated.
The "-" (subtraction) operator yields the difference of its arguments.
The numeric arguments are first converted to a common type.
"""
, 'bitwise':
"""Binary bitwise operations
*************************
Each of the three bitwise operations has a different priority level:
and_expr ::= shift_expr | and_expr "&" shift_expr
xor_expr ::= and_expr | xor_expr "^" and_expr
or_expr ::= xor_expr | or_expr "|" xor_expr
The "&" operator yields the bitwise AND of its arguments, which must
be integers.
The "^" operator yields the bitwise XOR (exclusive OR) of its
arguments, which must be integers.
The "|" operator yields the bitwise (inclusive) OR of its arguments,
which must be integers.
"""
, 'bltin-code-objects':
"""Code Objects
************
Code objects are used by the implementation to represent "pseudo-
compiled" executable Python code such as a function body. They differ
from function objects because they don't contain a reference to their
global execution environment. Code objects are returned by the built-
in "compile()" function and can be extracted from function objects
through their "__code__" attribute. See also the "code" module.
A code object can be executed or evaluated by passing it (instead of a
source string) to the "exec()" or "eval()" built-in functions.
See The standard type hierarchy for more information.
"""
, 'bltin-ellipsis-object':
"""The Ellipsis Object
*******************
This object is commonly used by slicing (see Slicings). It supports
no special operations. There is exactly one ellipsis object, named
"Ellipsis" (a built-in name). "type(Ellipsis)()" produces the
"Ellipsis" singleton.
It is written as "Ellipsis" or "...".
"""
, 'bltin-null-object':
"""The Null Object
***************
This object is returned by functions that don't explicitly return a
value. It supports no special operations. There is exactly one null
object, named "None" (a built-in name). "type(None)()" produces the
same singleton.
It is written as "None".
"""
, 'bltin-type-objects':
"""Type Objects
************
Type objects represent the various object types. An object's type is
accessed by the built-in function "type()". There are no special
operations on types. The standard module "types" defines names for
all standard built-in types.
Types are written like this: "<class 'int'>".
"""
, 'booleans':
"""Boolean operations
******************
or_test ::= and_test | or_test "or" and_test
and_test ::= not_test | and_test "and" not_test
not_test ::= comparison | "not" not_test
In the context of Boolean operations, and also when expressions are
used by control flow statements, the following values are interpreted
as false: "False", "None", numeric zero of all types, and empty
strings and containers (including strings, tuples, lists,
dictionaries, sets and frozensets). All other values are interpreted
as true. User-defined objects can customize their truth value by
providing a "__bool__()" method.
The operator "not" yields "True" if its argument is false, "False"
otherwise.
The expression "x and y" first evaluates *x*; if *x* is false, its
value is returned; otherwise, *y* is evaluated and the resulting value
is returned.
The expression "x or y" first evaluates *x*; if *x* is true, its value
is returned; otherwise, *y* is evaluated and the resulting value is
returned.
(Note that neither "and" nor "or" restrict the value and type they
return to "False" and "True", but rather return the last evaluated
argument. This is sometimes useful, e.g., if "s" is a string that
should be replaced by a default value if it is empty, the expression
"s or 'foo'" yields the desired value. Because "not" has to create a
new value, it returns a boolean value regardless of the type of its
argument (for example, "not 'foo'" produces "False" rather than "''".)
"""
, 'break':
"""The "break" statement
*********************
break_stmt ::= "break"
"break" may only occur syntactically nested in a "for" or "while"
loop, but not nested in a function or class definition within that
loop.
It terminates the nearest enclosing loop, skipping the optional "else"
clause if the loop has one.
If a "for" loop is terminated by "break", the loop control target
keeps its current value.
When "break" passes control out of a "try" statement with a "finally"
clause, that "finally" clause is executed before really leaving the
loop.
"""
, 'callable-types':
"""Emulating callable objects
**************************
object.__call__(self[, args...])
Called when the instance is "called" as a function; if this method
is defined, "x(arg1, arg2, ...)" is a shorthand for
"x.__call__(arg1, arg2, ...)".
"""
, 'calls':
"""Calls
*****
A call calls a callable object (e.g., a *function*) with a possibly
empty series of *arguments*:
call ::= primary "(" [argument_list [","] | comprehension] ")"
argument_list ::= positional_arguments ["," starred_and_keywords]
["," keywords_arguments]
| starred_and_keywords ["," keywords_arguments]
| keywords_arguments
positional_arguments ::= ["*"] expression ("," ["*"] expression)*
starred_and_keywords ::= ("*" expression | keyword_item)
("," "*" expression | "," keyword_item)*
keywords_arguments ::= (keyword_item | "**" expression)
("," keyword_item | "," "**" expression)*
keyword_item ::= identifier "=" expression
An optional trailing comma may be present after the positional and
keyword arguments but does not affect the semantics.
The primary must evaluate to a callable object (user-defined
functions, built-in functions, methods of built-in objects, class
objects, methods of class instances, and all objects having a
"__call__()" method are callable). All argument expressions are
evaluated before the call is attempted. Please refer to section
Function definitions for the syntax of formal *parameter* lists.
If keyword arguments are present, they are first converted to
positional arguments, as follows. First, a list of unfilled slots is
created for the formal parameters. If there are N positional
arguments, they are placed in the first N slots. Next, for each
keyword argument, the identifier is used to determine the
corresponding slot (if the identifier is the same as the first formal
parameter name, the first slot is used, and so on). If the slot is
already filled, a "TypeError" exception is raised. Otherwise, the
value of the argument is placed in the slot, filling it (even if the
expression is "None", it fills the slot). When all arguments have
been processed, the slots that are still unfilled are filled with the
corresponding default value from the function definition. (Default
values are calculated, once, when the function is defined; thus, a
mutable object such as a list or dictionary used as default value will
be shared by all calls that don't specify an argument value for the
corresponding | |
match):
"""Process match as piece move unless it fits better as a b-pawn move.
bx[ac][1-8] is ambiguous when case is ignored. The board state is
examined to decide if 'b' or 'B' means bishop or pawn. Sometimes
the only way to decide is by taking case into account.
"""
group = match.group
pml = group().lower()
piece_match = text_format.match(
pml[0].upper()
+ pml[1:]
+ match.string[match.end() : match.end() + 10]
)
if (
pml[0] in "kqrn"
or group(IFG_PIECE_DESTINATION)[0].lower() in "defgh"
or (
PGN_CAPTURE_MOVE in pml
and group(IFG_PIECE_DESTINATION)[0].lower() == "b"
)
or (
PGN_CAPTURE_MOVE not in pml
and group(IFG_PIECE_DESTINATION)[0].lower() in "ac"
)
):
if piece_match.group(IFG_PIECE_MOVE) is None:
self.append_token_and_set_error(match)
return
super(GameTextPGN, self).append_piece_move(piece_match)
self._bishop_or_bpawn = None
return
pawn_match = text_format.match(pml)
if pawn_match and pawn_match.lastindex != IFG_PAWN_TO_RANK:
pawn_match = None
if group(IFG_PIECE_DESTINATION)[1] in "18":
peek_start = match.span(match.lastindex)[-1]
if peek_start == len(match.string):
pawn_promotion_match = None
elif match.string[peek_start] != PGN_PROMOTION:
pawn_promotion_match = None
else:
pawn_promotion_match = text_promotion_format.match(
pml + match.string[peek_start : peek_start + 6].lower()
)
if pawn_promotion_match:
pawn_promotion_match = text_format.match(
"".join(
(
pawn_promotion_match.group(TP_MOVE),
pawn_promotion_match.group(
TP_PROMOTE_TO_PIECE
).upper(),
)
)
)
if pawn_promotion_match and (
pawn_promotion_match.lastindex
!= IFG_PAWN_PROMOTE_PIECE
):
pawn_promotion_match = None
else:
pawn_promotion_match = None
if self._movetext_offset is None:
if not self.set_initial_position():
self.append_token_and_set_error(match)
return
self._movetext_offset = len(self._text)
fen = generate_fen_for_position(
self._piece_placement_data.values(),
self._active_color,
self._castling_availability,
self._en_passant_target_square,
self._halfmove_clock,
self._fullmove_number,
)
setup = import_format.match('[SetUp"1"]')
fen = import_format.match(fen.join(('[FEN"', '"]')))
bishop_move = GameTextPGN()
bishop_move.append_start_tag(setup)
bishop_move.append_start_tag(fen)
bishop_move.append_piece_move(piece_match)
if pawn_match:
pawn_move = GameTextPGN()
pawn_move.append_start_tag(setup)
pawn_move.append_start_tag(fen)
pawn_move.append_pawn_move(pawn_match)
if pawn_promotion_match:
pawn_promotion_move = GameTextPGN()
pawn_promotion_move.append_start_tag(setup)
pawn_promotion_move.append_start_tag(fen)
pawn_promotion_move.append_pawn_promote_move(pawn_promotion_match)
if bishop_move.state is None:
if not (pawn_match or pawn_promotion_match):
super(GameTextPGN, self).append_piece_move(piece_match)
self._bishop_or_bpawn = None
return
if match[0].isupper():
super(GameTextPGN, self).append_piece_move(piece_match)
self._bishop_or_bpawn = None
return
# else:
# All tests passed with this commented code.
# However I am not convinced it is safe to collapse the
# conditionals under 'if bishop_move.state is None:' yet,
# nor that the original code below is sound in the new
# more limited scope.
# super(GameTextPGN, self).append_piece_move(piece_match)
# self._bishop_or_bpawn = None
# return
# pass
else:
if pawn_match and pawn_match.lastindex == IFG_PAWN_TO_RANK:
super(GameTextPGN, self).append_pawn_move(pawn_match)
self._bishop_or_bpawn = None
elif (
pawn_promotion_match
and pawn_promotion_match.lastindex == IFG_PAWN_PROMOTE_PIECE
):
super(GameTextPGN, self).append_pawn_promote_move(
pawn_promotion_match
)
self._bishop_or_bpawn = None
else:
self.append_token_and_set_error(match)
return
# Following code is mostly not used at version 2.1 compared with
# previous version. One route above falls through to here but it
# seems 'self.is_move_interpreted_as_piece_move(match)' always
# evalutes True.
# Implication might be that move is always a bishop move, never a
# pawn move, if bishop_move.state is None in the code above.
if self.is_move_interpreted_as_piece_move(match):
if piece_match.group(IFG_PIECE_MOVE) is None:
self.append_token_and_set_error(match)
return
super(GameTextPGN, self).append_piece_move(piece_match)
self._bishop_or_bpawn = None
return
if match.group(IFG_PIECE_DESTINATION)[1] in "18":
peek_start = match.span(match.lastindex)[-1]
if peek_start == len(match.string):
self.append_token_and_set_error(match)
elif match.string[peek_start] != PGN_PROMOTION:
self.append_token_and_set_error(match)
else:
promotion_match = text_promotion_format.match(
pml + match.string[peek_start : peek_start + 6].lower()
)
if promotion_match is None:
self.append_token_and_set_error(match)
return
promotion_match = text_format.match(
"".join(
(
promotion_match.group(TP_MOVE),
promotion_match.group(TP_PROMOTE_TO_PIECE).upper(),
)
)
)
if promotion_match is None:
self.append_token_and_set_error(match)
return
super(GameTextPGN, self).append_pawn_promote_move(
promotion_match
)
self._promotion_disambiguation_detected = True
self._bishop_or_bpawn = None
return
piece_match = text_format.match(pml)
if piece_match.group(IFG_PAWN_FROM_FILE) is None:
self.append_token_and_set_error(match)
return
super(GameTextPGN, self).append_pawn_move(piece_match)
self._bishop_or_bpawn = None
def is_move_interpreted_as_piece_move(self, match):
"""Return True if move is not a pawn move, and None otherwise.
is_move_interpreted_as_piece_move is called when deciding if a
movetext item starting 'b' or'B' should be treated as a pawn move
or a bishop move.
"""
group = match.group
piece = group(IFG_PIECE_MOVE).lower()
i = FILE_NAMES.find(piece)
if i < 0:
return True
square = group(IFG_PIECE_DESTINATION).lower()
file = square[0]
if file == piece:
return True
if file not in FILE_NAMES[i - 1 : i + 2]:
return True
if not group(IFG_PIECE_CAPTURE):
return True
if self._active_color == FEN_WHITE_ACTIVE:
source_squares = WHITE_PAWN_CAPTURES.get(square)
pawn = FEN_WHITE_PAWN
else:
source_squares = BLACK_PAWN_CAPTURES.get(square)
pawn = FEN_BLACK_PAWN
# In case this is done for first movetext element after tags.
if self._movetext_offset is None:
if not self.set_initial_position():
self.append_token_and_set_error(match)
return None
piece_placement_data = self._piece_placement_data
if source_squares:
for ssq in source_squares:
if ssq[0] == piece:
if ssq in piece_placement_data:
if piece_placement_data[ssq].name != pawn:
return True
for pgn_np in PGN_NAMED_PIECES:
if group(IFG_PIECE_MOVE) == pgn_np:
for pobsq in self._pieces_on_board[
pgn_np.lower()
if self._active_color == FEN_BLACK_ACTIVE
else pgn_np
]:
if POINT_TO_POINT.get(pobsq.square.name, square):
return True
return None
def append_pawn_move(self, match):
"""Delegate lower case match to superclass."""
# self._state is None and self._bishop_or_bpawn will have been set
# by self.append_other_or_disambiguation_pgn().
assert self._state is None
if self._bishop_or_bpawn:
bishop = text_format.match(
self._bishop_or_bpawn.group().upper() + match.group().lower()
)
if bishop:
super(GameTextPGN, self).append_piece_move(bishop)
self._bishop_or_bpawn = None
return
self.append_token_and_set_error()
return
mgl = match.group().lower()
# So the test on self._piece_placement_data gives a helpful answer.
# Cannot wait till it's done in 'super().append_pawn_move(pgn_match)'.
if self._movetext_offset is None:
if not self.set_initial_position():
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
self._ravstack.append([0])
self._movetext_offset = len(self._text)
if PGN_CAPTURE_MOVE in mgl:
if mgl.startswith(FEN_BLACK_BISHOP):
self._append_bishop_or_bpawn_capture(match)
return
pgn_match = text_format.match(mgl[0] + mgl[-3:])
elif LAN_MOVE_SEPARATOR in mgl:
if mgl.startswith(FEN_BLACK_BISHOP):
self._append_bishop_or_bpawn_move(match)
return
for source_rank, destination_rank, ep_rank in ("243", "756"):
if mgl[1] == source_rank and mgl[4] == destination_rank:
if mgl[0] != mgl[3]:
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
if mgl[0] + ep_rank in self._piece_placement_data:
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
pgn_match = text_format.match(mgl[-2:])
elif mgl not in self._piece_placement_data:
pgn_match = text_format.match(mgl)
elif self._full_disambiguation_detected:
del self._full_disambiguation_detected
self._bishop_or_bpawn = None
return
else:
self._long_algebraic_notation_pawn_move(text_format.match(mgl))
if self._state is not None:
piece = self._piece_placement_data[mgl]
if self._active_color == piece.color:
if self._active_color == FEN_WHITE_ACTIVE:
if piece.name == FEN_WHITE_PAWN:
self._full_disambiguation_detected = True
elif self._active_color == FEN_BLACK_ACTIVE:
if piece.name == FEN_BLACK_PAWN:
self._full_disambiguation_detected = True
self._bishop_or_bpawn = possible_bishop_or_bpawn.match(
match.group()
)
if (
self._full_disambiguation_detected
and self._bishop_or_bpawn
):
del self._full_disambiguation_detected
else:
self._bishop_or_bpawn = None
return
if pgn_match is None:
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
super(GameTextPGN, self).append_pawn_move(pgn_match)
if self._state is None:
self._bishop_or_bpawn = None
return
# self._state is not None so self.append_token_after_error() will
# process next token.
assert self._state is not None
self._bishop_or_bpawn = possible_bishop_or_bpawn.match(match.group())
def append_pawn_promote_move(self, match):
"""Delegate lower case move with upper case promotion to superclass."""
mgl = match.group().lower()
if mgl.startswith(FEN_BLACK_BISHOP):
# So the test on self._piece_placement_data gives a helpful answer.
# Cannot wait for 'super().append_pawn_move(pgn_match)'.
if self._movetext_offset is None:
if not self.set_initial_position():
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
self._ravstack.append([0])
self._movetext_offset = len(self._text)
if PGN_CAPTURE_MOVE in mgl:
if PGN_PROMOTION in mgl:
promotion_match = text_promotion_format.match(
mgl[0] + mgl[-5:]
)
else:
promotion_match = text_promotion_format.match(
mgl[0] + mgl[-4:]
)
elif LAN_MOVE_SEPARATOR in mgl:
if PGN_PROMOTION in mgl:
promotion_match = text_promotion_format.match(mgl[-4:])
else:
promotion_match = text_promotion_format.match(mgl[-3:])
else:
promotion_match = text_promotion_format.match(mgl)
if promotion_match is None:
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
pmg = promotion_match.group()
promotion_match = import_format.match(pmg[:-1] + pmg[-1].upper())
if promotion_match is None:
self.append_token_and_set_error(match)
self._bishop_or_bpawn = None
return
super(GameTextPGN, self).append_pawn_promote_move(promotion_match)
self._bishop_or_bpawn = None
def _append_recovered_bishop_or_bpawn_move(self, match):
"""Return True if match is resolved to a bishop or b-pawn move."""
mgt = match.group().lower()
promotion_match = text_format.match(
LAN_MOVE_SEPARATOR.join(
(
self._bishop_or_bpawn.group().lower(),
mgt[:-1] + mgt[-1].upper(),
)
)
)
promotion_lastindex = (
promotion_match
and promotion_match.lastindex == IFG_PAWN_PROMOTE_PIECE
)
if not (
mgt.startswith(LAN_MOVE_SEPARATOR)
or mgt.startswith(PGN_CAPTURE_MOVE)
):
mgt = LAN_MOVE_SEPARATOR + mgt
bishop_match = text_format.match(
"".join((self._bishop_or_bpawn.group().upper(), mgt))
)
pawn_match = text_format.match(
"".join((self._bishop_or_bpawn.group().lower(), mgt))
)
bishop_lastindex = (
bishop_match and bishop_match.lastindex == IFG_PIECE_DESTINATION
)
pawn_lastindex = pawn_match and (
pawn_match.lastindex == IFG_PAWN_TO_RANK
or pawn_match.lastindex == IFG_PAWN_PROMOTE_PIECE
)
# Try the available matches.
if promotion_lastindex:
self._undo_append_token_and_set_error()
super().append_pawn_promote_move(promotion_match)
self._bishop_or_bpawn = None
elif bishop_lastindex and pawn_lastindex:
fen = generate_fen_for_position(
self._piece_placement_data.values(),
self._active_color,
self._castling_availability,
self._en_passant_target_square,
self._halfmove_clock,
self._fullmove_number,
)
setup = import_format.match('[SetUp"1"]')
fen = import_format.match(fen.join(('[FEN"', '"]')))
bishop_move = GameTextPGN()
bishop_move.append_start_tag(setup)
bishop_move.append_start_tag(fen)
bishop_move.append_piece_move(bishop_match)
pawn_move = GameTextPGN()
pawn_move.append_start_tag(setup)
pawn_move.append_start_tag(fen)
pawn_move.append_pawn_move(pawn_match)
if bishop_move.state is None and pawn_move.state is None:
if match.group()[0].isupper():
self._undo_append_token_and_set_error()
super().append_piece_move(bishop_match)
else:
self._undo_append_token_and_set_error()
self._append_pawn_move(pawn_match)
elif bishop_move.state is None:
self._undo_append_token_and_set_error()
super().append_piece_move(bishop_match)
elif pawn_move.state is None:
self._undo_append_token_and_set_error()
self._append_pawn_move(pawn_match)
# This looks equivalent to doing nothing, but if removed leads to
# an exception in _append_pawn_move at 'if mgl[0] != mgl[3]:'.
# Without the adjustment to self._text[-1] the text from match
# appears in two adjacent self._text elements.
else:
error_text = self._text[-1]
self._undo_append_token_and_set_error()
self.append_token_and_set_error(match)
self._text[-1] = error_text
elif pawn_lastindex:
self._undo_append_token_and_set_error()
self._append_pawn_move(pawn_match)
elif bishop_lastindex:
self._undo_append_token_and_set_error()
super().append_piece_move(bishop_match)
else:
self.append_token_and_set_error(match)
return bool(self._state is None)
def _append_pawn_move(self, match):
mgl = match.group()
try:
different_file = mgl[0] != mgl[3]
except IndexError:
self.append_token_and_set_error(match)
return
if different_file:
self.append_token_and_set_error(match)
return
for source_rank, destination_rank, ep_rank in | |
#!/usr/bin/python3
# Author: GMFTBY
# Time: 2020.2.7
'''
Seq2Seq in Transformer, implemented by Pytorch's nn.Transformer
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import math
import random
import numpy as np
import pickle
import ipdb
import sys
import types
import transformers
from .layers import *
class Decoder(nn.Module):
'''
Add the multi-head attention for GRU
'''
def __init__(self, embed_size, hidden_size, output_size,
n_layers=2, dropout=0.5, nhead=8):
super(Decoder, self).__init__()
self.embed_size, self.hidden_size = embed_size, hidden_size
self.output_size = output_size
self.embed = nn.Embedding(output_size, embed_size)
self.multi_head_attention = nn.ModuleList([Attention(hidden_size) for _ in range(nhead)])
self.attention = Attention(hidden_size)
self.rnn = nn.GRU(hidden_size + embed_size,
hidden_size,
num_layers=n_layers,
dropout=(0 if n_layers == 1 else dropout))
self.out = nn.Linear(hidden_size, output_size)
self.ffn = nn.Linear(nhead*hidden_size, hidden_size)
self.init_weight()
def init_weight(self):
# orthogonal inittor
init.xavier_normal_(self.rnn.weight_hh_l0)
init.xavier_normal_(self.rnn.weight_ih_l0)
self.rnn.bias_ih_l0.data.fill_(0.0)
self.rnn.bias_hh_l0.data.fill_(0.0)
def forward(self, inpt, last_hidden, encoder_outputs):
# inpt: [batch]
# last_hidden: [2, batch, hidden_size]
embedded = self.embed(inpt).unsqueeze(0) # [1, batch, embed_size]
# attn_weights: [batch, 1, timestep of encoder_outputs]
key = last_hidden.sum(axis=0)
# calculate the attention
context_collector = []
for attention_head in self.multi_head_attention:
attn_weights = attention_head(key, encoder_outputs)
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
context = context.squeeze(1).transpose(0, 1) # [hidden, batch]
context_collector.append(context) # [N, hidden, batch]
context = torch.stack(context_collector).view(-1, context.shape[-1]).transpose(0, 1) # [N, hidden, batch]
# context = context.view(-1, context.shape[-1]).transpose(0, 1) # [batch, N*hidden]
context = torch.tanh(self.ffn(context)).unsqueeze(0) # [1, batch, hidden]
# context: [batch, 1, hidden_size]
# context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# context = context.transpose(0, 1)
rnn_input = torch.cat([embedded, context], 2)
output, hidden = self.rnn(rnn_input, last_hidden)
output = output.squeeze(0)
# context = context.squeeze(0)
# [batch, hidden * 2]
# output = self.out(torch.cat([output, context], 1))
output = self.out(output) # [batch, output_size]
output = F.log_softmax(output, dim=1)
# output: [batch, output_size]
# hidden: [2, batch, hidden_size]
# hidden = hidden.squeeze(0)
return output, hidden
class Transformer(nn.Module):
'''
Transformer encoder and GRU decoder
Multi-head attention for GRU
'''
def __init__(self, input_vocab_size, opt_vocab_size, d_model, nhead,
num_encoder_layers, dim_feedforward, position_embed_size=300,
utter_n_layer=2, dropout=0.3, sos=0, pad=0, teach_force=1):
super(Transformer, self).__init__()
self.d_model = d_model
self.hidden_size = d_model
self.embed_src = nn.Embedding(input_vocab_size, d_model)
# position maxlen is 5000
self.pos_enc = PositionEmbedding(d_model, dropout=dropout,
max_len=position_embed_size)
self.input_vocab_size = input_vocab_size
self.utter_n_layer = utter_n_layer
self.opt_vocab_size = opt_vocab_size
self.pad, self.sos = pad, sos
self.teach_force = teach_force
encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead,
dim_feedforward=dim_feedforward,
dropout=dropout, activation='gelu')
self.encoder = nn.TransformerEncoder(encoder_layer,
num_layers=num_encoder_layers)
self.decoder = Decoder(d_model, d_model, opt_vocab_size,
n_layers=utter_n_layer, dropout=dropout, nhead=nhead)
def generate_key_mask(self, x, lengths):
# x: [seq, batch]
# return: key mask [batch, seq]
seq_length = x.shape[0]
masks = []
for sentence_l in lengths:
masks.append([False for _ in range(sentence_l)] + [True for _ in range(seq_length - sentence_l)])
masks = torch.tensor(masks)
if torch.cuda.is_available():
masks = masks.cuda()
return masks
def forward(self, src, tgt, lengths):
# src: [seq, batch], tgt: [seq, batch], lengths: [batch]
batch_size, max_len = src.shape[1], tgt.shape[0]
src_key_padding_mask = self.generate_key_mask(src, lengths)
outputs = torch.zeros(max_len, batch_size, self.opt_vocab_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
# src: [seq, batch, d_model]
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
# memory: [seq, batch, d_model]
memory = self.encoder(src, src_key_padding_mask=src_key_padding_mask)
# hidden: [2, batch, d_model]
hidden = torch.randn(self.utter_n_layer, batch_size, self.hidden_size)
if torch.cuda.is_available():
hidden = hidden.cuda()
output = tgt[0, :]
use_teacher = random.random() < self.teach_force
if use_teacher:
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, memory)
outputs[t] = output
output = tgt[t]
else:
for t in range(1, max_len):
output, hidden = self.decoder(output, hidden, memory)
outputs[t] = output
output = output.topk(1)[1].squeeze().detach()
# [max_len, batch, output_size]
return outputs
def predict(self, src, maxlen, lengths, loss=True):
with torch.no_grad():
batch_size = src.shape[1]
src_key_padding_mask = self.generate_key_mask(src, lengths)
outputs = torch.zeros(maxlen, batch_size)
floss = torch.zeros(maxlen, batch_size, self.opt_vocab_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
floss = floss.cuda()
# src: [seq, batch, d_model]
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
# memory: [seq, batch, d_model]
memory = self.encoder(src, src_key_padding_mask=src_key_padding_mask)
# hidden: [2, batch, d_model]
hidden = torch.randn(self.utter_n_layer, batch_size, self.hidden_size)
if torch.cuda.is_available():
hidden = hidden.cuda()
output = torch.zeros(batch_size, dtype=torch.long).fill_(self.sos)
if torch.cuda.is_available():
output = output.cuda()
for t in range(1, maxlen):
output, hidden = self.decoder(output, hidden, memory)
floss[t] = output
# output = torch.max(output, 1)[1] # [1]
output = output.topk(1)[1].squeeze()
outputs[t] = output # output: [1, output_size]
if loss:
return outputs, floss
else:
return outputs
'''
class Transformer(nn.Module):
# Refer to:
# - https://github.com/andrewpeng02/transformer-translation
def __init__(self, inpt_vocab_size, opt_vocab_size, d_model, nhead,
num_encoder_layers, num_decoder_layers,
dim_feedforward, dropout, sos=0, pad=0):
super(Transformer, self).__init__()
self.d_model = d_model
self.embed_src = nn.Embedding(inpt_vocab_size, d_model)
self.embed_tgt = nn.Embedding(opt_vocab_size, d_model)
self.pos_enc = PositionEmbedding(d_model, dropout=dropout)
self.inpt_vocab_size = inpt_vocab_size
self.opt_vocab_size = opt_vocab_size
self.pad, self.sos = pad, sos
self.model = nn.Transformer(d_model, nhead,
num_encoder_layers,
num_decoder_layers,
dim_feedforward,
dropout)
self.fc = nn.Linear(d_model, opt_vocab_size)
self.init_weight()
def init_weight(self):
for p in self.parameters():
if p.dim() > 1:
init.xavier_normal_(p)
def forward(self, src, tgt,
src_key_padding_mask,
tgt_key_padding_mask,
memory_key_padding_mask):
# src, tgt: [seq, batch]
tgt_mask = gen_nopeek_mask(tgt.shape[0])
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
tgt = self.pos_enc(self.embed_tgt(tgt) * math.sqrt(self.d_model))
# encoder and decoder in one line
# input:
# src: [seq, batch]
# tgt: [seq, batch]
# src_key_padding_mask: [batch, seq]
# tgt_key_padding_mask: [batch, seq]
# memory_key_padding_mask: [batch, seq]
# output: [seq, batch, vocab]
output = self.model(src,
tgt,
tgt_mask=tgt_mask,
src_key_padding_mask=src_key_padding_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
# [seq, batch, vocab_size]
return F.log_softmax(self.fc(output), dim=-1)
def predict(self, src,
src_key_padding_mask,
memory_key_padding_mask,
maxlen):
# src: [seq, batch]
with torch.no_grad():
batch_size = src.shape[1]
outputs = torch.zeros(maxlen, batch_size)
floss = torch.zeros(maxlen, batch_size, self.opt_vocab_size)
if torch.cuda.is_available():
outputs = outputs.cuda()
floss = floss.cuda()
output = torch.zeros(batch_size, dtype=torch.long).fill_(self.sos)
if torch.cuda.is_available():
output = output.cuda()
output = [output]
src = self.pos_enc(self.embed_src(src) * math.sqrt(self.d_model))
for t in range(1, maxlen):
# tgt: [seq, batch, vocab_size]
# this part is slow druing inference
tgt_mask = gen_nopeek_mask(t)
soutput = torch.stack(output)
soutput = self.pos_enc(self.embed_tgt(soutput) * math.sqrt(self.d_model))
tgt = self.model(src,
soutput,
src_key_padding_mask=src_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
tgt_key_padding_mask=None,
tgt_mask=tgt_mask)
tgt = F.log_softmax(self.fc(tgt[-1]), dim=-1) # [batch, vocab_size]
floss[t] = tgt
tgt = tgt.topk(1)[1].squeeze() # [batch]
outputs[t] = tgt
output.append(tgt)
return outputs, floss
'''
'''
def bert_for_masked_lm_forward(self, input_ids, encoder_hidden, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
encoder_hidden=encoder_hidden, # NOTE: add this line
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = torch.nn.CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
def bert_model_forward(self, input_ids, encoder_hidden, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.