input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<reponame>anderzzz/fjarrsyn<filename>fjarrsyn/core/agent_ms.py<gh_stars>0
'''Agent Management System
'''
from collections.abc import Iterable
from collections import OrderedDict
from uuid import uuid4
import numpy as np
import numpy.random
import itertools
import networkx as nx
from fjarrsyn.core.agent import Agent
from fjarrsyn.core.graph import Node
from fjarrsyn.core.instructor import Compulsion, Mutation
from fjarrsyn.simulation.sampler import AgentSampler, EnvSampler, GraphSampler, SystemIO
class AgentManagementSystem(object):
'''Base class for the medium in which agents interacts with other agents or
other external objects.
Parameters
----------
name : str
Name of the agent management system
agents
Iterable of agents to be part of the system, in no particular order.
full_agents_graph : Graph, optional
Optional Graph object that defines the spatial network relation between
the agents of the system. If none given, a complete non-directional
graph is used.
common_env : optional
In case all agents have a common environment, provide the corresponding
object here
strict_engine : bool, optional
If False, any exceptions from engine execution of instructors are
non-fatal to the execution. If True, engine exceptions terminates
execution
Raises
------
TypeError
If a full agents graph is given that is not of the Graph class
Notes
-----
The agent management system is an abstract object in which the agent
affordances are implemented and transplanted into the relevant agent
organs. Any spatial network relations between agents are part of this
system. Agents part of the system are assinged a unique ID.
'''
def neighbours_to(self, agent_index, agents_only=True):
'''Method to extract the agent objects neighbours in the graph to the
agent of a certain system identifier
Parameters
----------
agent_index : str
The agent identifier within the system, available from the
`agent_id_system` attribute
agents_only : bool, optional
If True, only the agents of the graph neighbours are returned,
otherwise the entire Node object of the neighbours
Returns
-------
agents_hood : list
Agents (or Nodes) directly adjacent to the given agent
'''
node_with_agent = self.node_from_agent_id_[agent_index]
node_neighbours = self.agents_graph.neighbors(node_with_agent)
node_neighbours = list(node_neighbours)
if agents_only:
ret_neighbours = [x.agent_content for x in node_neighbours]
else:
ret_neighbours = node_neighbours
return ret_neighbours
def edge_property(self, agent_index_1, agent_index_2):
'''Determine property of edge, including if there is an edge at all
Parameters
----------
agent_index_1 : str
Index of first agent in system
agent_index_2 : str
Index of second agent in system
Returns
-------
'''
node_agent_1 = self.node_from_agent_id_[agent_index_1]
node_agent_2 = self.node_from_agent_id_[agent_index_2]
there_is_edge = (node_agent_1, node_agent_2) in self.agents_graph.edges
if not there_is_edge:
return there_is_edge, None
else:
edge_attribute = self.agents_graph[node_agent_1][node_agent_2]
return there_is_edge, edge_attribute
def edge_edit(self, agent_index_1, agent_index_2,
delete=False, add=False, weight=None):
'''Edit the agent graph
Notes
-----
The method does not verify if the graph operation makes sense given the
current topology. If not, exceptions are raised in graph library
method.
Parameters
----------
agent_index_1 : str
Index of first agent in system
agent_index_2 : str
Index of second agent in system
delete : bool, optional
If True, delete edge between the two agents
add : bool, optional
If True, add edge between the two agents
weight : optional
Add or set weight attribute of edge, either previously existing or
just added
Raises
------
NetworkXError
If operations on the graph are meaningless in current topology
'''
node_agent_1 = self.node_from_agent_id_[agent_index_1]
node_agent_2 = self.node_from_agent_id_[agent_index_2]
if delete:
self.agents_graph.remove_edge(node_agent_1, node_agent_2)
if add:
self.agents_graph.add_edge(node_agent_1, node_agent_2)
if not weight is None:
raise NotImplementedError('Weighted graphs not fully implemented')
self.agents_graph.edges[node_agent_1, node_agent_2]['weight'] = weight
def _shuffle_items(self, items, max_iter, replace):
'''Shuffled iterator over agent graph items.
Notes
-----
This method of iteration requires the graph to remain as within the
iteration. Nodes can therefore not be added or removed inside the loop,
nor can agents be added or removed from nodes. For cases like that use
an iteration over some variant of `_choice_items`.
Parameters
----------
items : Iterable
The container of graph items, such as Nodes or Agents
max_iter : int
The number of entries the iterator should yield before termination.
If set to None, the iteration is infinite.
replace : bool, optional
If True, the iterator selects items randomly with replacement such
that an uneven sampling of the items can be generated. If False,
the iterator selects items in random order, but guaranteed to
generate an even sampling of the items.
Yields
------
item
One item of the input items, that is a Node or Agent
'''
def _terminator(counter):
if max_iter is None:
return True
elif counter < max_iter:
return True
else:
return False
shuffled = []
counter = 0
while _terminator(counter):
counter += 1
if len(shuffled) == 0:
shuffled = list(np.array(items)[np.random.choice(len(items),
size=len(items),
replace=replace)])
entry = shuffled.pop(0)
yield entry
def _cycle_items(self, items, max_iter):
'''Ordered iterator over agent graph items
Notes
-----
This method of iteration requires the graph to remain as within the
iteration. Nodes can therefore not be added or removed inside the loop,
nor can agents be added or removed from nodes. For cases like that use
an iteration over some variant of `_choice_items`.
Parameters
----------
items : Iterable
The container of graph items, such as Nodes or Agents
max_iter : int
The number of entries the iterator should yield before termination.
If set to None, the iteration is infinite.
Yields
------
item
One item of the input items, that is a Node or Agent
'''
if max_iter is None:
return itertools.cycle(items)
else:
for n, element in enumerate(itertools.cycle(items)):
if n < max_iter:
yield element
else:
break
def _choice_items(self, items):
'''Select random item from the agent graph items
Parameters
----------
items : Iterable
The container of graph items, such as Nodes or Agents
Returns
-------
items
One random item of the input items, that is a Node or Agent
'''
return items[np.random.randint(len(items))]
def _strip_node_agent(self, agents_only, subset):
'''Retrieve node graph items
Parameters
----------
agents_only : bool
If True, extract Agents from graph. If False, extract Nodes
subset : Iterable
A subset of the graph to extract items from. If None, entire graph
Returns
-------
items
Collection of agents or nodes from the graph
'''
if subset is None:
items = list(self.agents_graph.nodes)
else:
items = list(subset)
if agents_only:
items = []
for x in map(lambda x: x.agent_content, self.agents_graph.nodes):
if not x is None:
items.append(x)
return items
def _strip_edge_agent(self, agents_only, subset):
'''Retrieve edge graph items
Parameters
----------
agents_only : bool
If True, extract Agent-Agent edge pairs from graph. If False,
extract Node-Node edge pairs
subset : Iterable
A subset of the graph to extract items from. If None, entire graph
Returns
-------
items
Collection of agent-agent or node-node edge pairs from the graph
'''
if subset is None:
items = list(self.agents_graph.edges)
else:
items = list(subset)
if agents_only:
items = []
for x in map(lambda x: (x[0].agent_content, x[1].agent_content),
self.agents_graph.edges):
if not None in x:
items.append(x)
return items
def shuffle_nodes(self, agents_only, max_iter, replace, subset=None):
'''Shuffled iterator over agent graph nodes
Notes
-----
This method of iteration requires the graph to remain as within the
iteration. Nodes can therefore not be added or removed inside the loop,
nor can agents be added or removed from nodes. For cases like that use
an iteration over `choice_nodes`.
Parameters
----------
agents_only : bool
If True, extract Agents from graph. If False, extract Nodes from
graph
max_iter : int
The number of entries the iterator should yield before termination.
If set to None, the iteration is infinite.
replace : bool, optional
If True, the iterator selects nodes randomly with replacement such
that an uneven sampling of the nodes can be generated. If False,
the iterator selects nodes in random order, but guaranteed to
generate an even sampling of the nodes.
subset : Iterable
A subset of the graph to extract items from. If None, entire graph
Yields
------
item
Node or Agent of graph
'''
return self._shuffle_items(self._strip_node_agent(agents_only, subset),
max_iter, replace)
def shuffle_edges(self, agents_only, max_iter, replace, subset=None):
'''Shuffled iterator over agent graph edges
Notes
-----
This method of iteration requires the graph to remain as within the
iteration. Nodes can therefore not be added or removed inside the loop,
nor can | |
its vertices.
"""
pass
def setFaceVertexColor(*args, **kwargs):
"""
setFaceVertexColor(color, faceId, vertexId, modifier=None, rep=kRGBA) -> self
Sets a face-specific normal at a vertex.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setFaceVertexColors(*args, **kwargs):
"""
setFaceVertexColors(colors, faceIds, vertexIds, modifier=None, rep=kRGBA) -> self
Sets the colors of the specified face/vertex pairs.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setFaceVertexNormal(*args, **kwargs):
"""
setFaceVertexNormal(normal, faceId, vertexId, space=MSpace.kObject, modifier=None) -> self
Sets a face-specific normal at a vertex.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
def setFaceVertexNormals(*args, **kwargs):
"""
setFaceVertexNormal(normals, faceIds, vertexIds, space=MSpace.kObject) -> self
Sets normals for the given face/vertex pairs.
"""
pass
def setFloatBlindData(*args, **kwargs):
"""
setFloatBlindData(compId, compType, blindDataId, attr, data) -> self
setFloatBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'float' blind data attribute
on a single component of the mesh. The data must be a single float.
The second version sets the value of a 'float' blind data attribute
on multiple components of the mesh. If the data is a sequence of
floats then it must provide a value for each component in compIds.
If it is a single float then all of the specified components will
have their blind data set to that value.
"""
pass
def setIntBlindData(*args, **kwargs):
"""
setIntBlindData(compId, compType, blindDataId, attr, data) -> self
setIntBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'int' blind data attribute
on a single component of the mesh. The data must be a single int.
The second version sets the value of a 'int' blind data attribute
on multiple components of the mesh. If the data is a sequence of
ints then it must provide a value for each component in compIds.
If it is a single int then all of the specified components will
have their blind data set to that value.
"""
pass
def setInvisibleFaces(*args, **kwargs):
"""
setInvisibleFaces(faceIds, makeVisible=False) -> self
Sets the specified faces of the mesh to be visible or invisible. See
the getInvisibleFaces() method for a description of invisible faces.
"""
pass
def setIsColorClamped(*args, **kwargs):
"""
setIsColorClamped(colorSet, clamped) -> self
Sets whether the color set's RGBA components should be clamped to the
range 0 to 1.
"""
pass
def setNormals(*args, **kwargs):
"""
setNormals(normals, space=MSpace.kObject) -> self
Sets the mesh's normals (user normals).
"""
pass
def setPoint(*args, **kwargs):
"""
setPoint(vertexId, MPoint, space=MSpace.kObject) -> self
Sets the position of specified vertex.
Note that if you modify the position of a vertex for a mesh node (as
opposed to mesh data), a tweak will be created. If you have a node
with no history, the first time that a tweak is created, the
underlying pointers under the MFnMesh object may change. You will
need to call syncObject() to make sure that the object is valid.
Subsequent calls to setPoint() on the same object do not require a
syncObject() call.
"""
pass
def setPoints(*args, **kwargs):
"""
setPoints(points, space=MSpace.kObject) -> self
Sets the positions of the mesh's vertices. The positions may be
given as a sequence of MFloatPoint's or a sequence of MPoint's, but
not a mix of the two.
"""
pass
def setSmoothMeshDisplayOptions(*args, **kwargs):
"""
setSmoothMeshDisplayOptions(MMeshSmoothOptions) -> self
Sets the options to use when smoothing the mesh for display.
"""
pass
def setSomeColors(*args, **kwargs):
"""
setSomeColors(colorIds, colors, colorSet='', rep=kRGBA) -> self
Sets specific colors in a colorSet.
If the largest colorId in the sequence is larger than numColors()
then the colorSet will be grown to accommodate the new color values.
If you have added new colorIds, you can call assignColors to assign
the colorIds to the geometry. If you are modifying existing colors,
they will already be referenced by the existing mesh data.
"""
pass
def setSomeUVs(*args, **kwargs):
"""
setSomeUVs(uvIds, uValues, vValues, uvSet='') -> self
Sets the specified texture coordinates (uv's) for this mesh. The uv
value sequences and the uvIds sequence must all be of equal size. If
the largest uvId in the array is larger than numUVs() then the uv
list for this mesh will be grown to accommodate the new uv values.
If a named uv set is given, the array will be grown when the largest
uvId is larger than numUVs(uvSet).
If you have added new uvIds, you must call one of the assignUV
methods to assign the uvIds to the geometry. If you are modifying
existing UVs, you do not need to call one of the assignUV methods.
"""
pass
def setStringBlindData(*args, **kwargs):
"""
setStringBlindData(compId, compType, blindDataId, attr, data) -> self
setStringBlindData(seq of compId, compType, blindDataId, attr, data) -> self
The first version sets the value of a 'string' blind data attribute
on a single component of the mesh. The data must be a single string.
The second version sets the value of a 'string' blind data attribute
on multiple components of the mesh. If the data is a sequence of
strings then it must provide a value for each component in compIds.
If it is a single string then all of the specified components will
have their blind data set to that value.
"""
pass
def setUV(*args, **kwargs):
"""
setUV(uvId, u, v, uvSet='') -> self
Sets the specified texture coordinate.
The uvId is the element in the uv list that will be set. If the uvId
is greater than or equal to numUVs() then the uv list will be grown
to accommodate the specified uv. If the UV being added is new, thenyou must call one of the assignUV methods in order to update the
geometry.
"""
pass
def setUVs(*args, **kwargs):
"""
setUVs(uValues, vValues, uvSet='') -> self
Sets all of the texture coordinates (uv's) for this mesh. The uv
value sequences must be of equal size and must be at least as large
as the current UV set size. You can determine the UV set size by
calling numUVs() for the default UV set, or numUVs(uvSet) for a
named UV set.
If the sequences are larger than the UV set size, then the uv list
for this mesh will be grown to accommodate the new uv values.
After using this method to set the UV values, you must call one of
the assignUV methods to assign the corresponding UV ids to the
geometry.
In order to shrink the uvs array, do the following: clearUVs(),
setUVs(...), assignUVs(). These steps will let you to create an
array of uvs which is smaller than the original one.
"""
pass
def setVertexColor(*args, **kwargs):
"""
setVertexColor(color, vertexId, modifier=None, rep=kRGBA) -> self
Sets the color for a vertex in all the faces which share it.
If 'modifier' (MDGModifier) is provided then the operation will be
added to the modifier and will not take effect until the modifier's
doIt() is called. Otherwise it will take effect immediately.
"""
pass
| |
<filename>prmaya/scripts/prDeformPaint.py
"""
SOURCE
https://github.com/parzival-roethlein/prmaya
DESCRIPTION
Modeling brushes, mainly for blendshape targets (similar to DPK_paintDeform.mel)
Operations:
- Smooth delta: average the delta (vertex vector from target to painted mesh)
with its neighbors. Reasons to do so:
- preserve local surface details
- stay "on model"
- skin sliding
- smooth/relax on compression/extension
- Copy vertex: move painted mesh vertex to the target vertex position
- Closest point: move the painted mesh vertex to the closest point on surface
of the target mesh
- Closest vertex: move the painted mesh vertex to the closest vertex of the
target mesh
- Average vertex: move a vertex to the average position of its neighbors. It's
the same behavior as Mayas "Relax Tool" and "Average vertices"
Right click popup menus:
- "Target: [...]" area to help with common target use cases
- Operations area (Smooth delta, Copy vertex, ...) to toggle settings menuBar
Same deformation strength no matter what the edited blendshape target weight and
envelope values are
USAGE
import prDeformPaint
prDeformPaint.Ui()
INSTALLATION
Your Maya environment has to be able to access the folders of:
(MAYA_PLUG_IN_PATH:)
.../prmaya/plugins/prMovePointsCmd.py
(MAYA_SCRIPT_PATH:)
.../prmaya/scripts/prDeformPaintBrush.mel
.../prmaya/scripts/prDeformPaint.py
LINKS
- Demo: TODO
- Making-of:
https://pazrot3d.blogspot.com/2020/01/prdeformpaintpy-making-of.html
MOTIVATION
- Replace DPK_paintDeform.mel, because:
- It permanently breaks meshes that are in edit blendshape target mode
- It crashes Maya when flooding twice in a row (workaround is re-enter tool
between each flood)
- It is slow (MEL)
- Partially unpredictable deformation when deforming multiple vertices,
because the deltas are calculated just in time, while vertices are moving
- Is useful in addition to Mayas sculpt brushes because:
- they do not support flooding vertex selection
- they do not support viewport "isolate selected" mesh components
- they are buggy (erase delta not really deleting deltas, ...)
- missing "average delta" operation
- New operations: Closest point, Closest vertex
TODO
- fix blendshapeMultiplier for inbetween shapes
- (maybe) from edge-neighbor-vertices to face-neighbor-vertices ?
- save target with mc.fileInfo (maybe initialization has to be adjusted)
- merge UI into "tool settings" window with "paint scripts tool"
(+selection context for target, so not tool switch is required)
"""
from itertools import izip
from collections import defaultdict
import maya.cmds as mc
import maya.api.OpenMaya as om
import pymel.core as pm
import maya.mel as mm
class Ui(pm.uitypes.Window):
_TITLE = 'prDeformPaint_100'
def __new__(cls):
""" delete possible old window and create new instance """
if pm.window(cls._TITLE, exists=True):
pm.deleteUI(cls._TITLE)
self = pm.window(cls._TITLE, title=cls._TITLE)
return pm.uitypes.Window.__new__(cls, self)
def __init__(self, operation=0, setTargetFromSelection=True,
menuBarVisible=True, space=om.MSpace.kObject,
minDeltaLength=0.00001, templateDuplicate=True,
visibleDuplicate=True):
"""
:param operation: (int) 0=Smooth delta, 1=Copy vertex, 2=Closest point,
3=Closest vertex, 4=Average vertex
:param setTargetFromSelection: (bool) if there is no previous target
stored (=first time running the UI in the maya instance) use the current
selection
:param space: (int) om.MSpace.k___
:param menuBarVisible: (bool) settings menuBar visibility
:param minDeltaLength: (float) deltas shorter than this are ignored
:param templateDuplicate: (bool) duplicate.template=___
:param visibleDuplicate: (bool) duplicate.visibility=___
"""
initializeMaya()
self.minDeltaLengthDefault = minDeltaLength
with pm.verticalLayout() as mainLayout:
with pm.menuBarLayout() as self.menuBar:
self.space = pm.menu(label='Space', tearOff=True)
pm.radioMenuItemCollection()
self.spaces = []
for name, value in self.getSpaceStrings(space):
self.spaces.append(
pm.menuItem(label=name, radioButton=value,
command=pm.Callback(self.syncMelVariablesWithUi))
)
pm.menu(label='Settings')
self.templateDuplicate = pm.menuItem(
label='template DUPLICATE',
checkBox=templateDuplicate)
self.visibleDuplicate = pm.menuItem(
label='visible DUPLICATE',
checkBox=visibleDuplicate)
self.minDeltaLength = pm.menuItem(
label='minDeltaLength: {}'.format(minDeltaLength),
command=pm.Callback(self.setMinDeltaLengthDialog))
pm.menu(label='Help')
# pm.menuItem(label='TODO: demo video (vimeo)')
# pm.menuItem(label='TODO: latest installer (...)')
pm.menuItem(label='latest version (github)',
command=pm.Callback(self.getLatestVersion))
self.menuBar.setMenuBarVisible(menuBarVisible)
with pm.horizontalLayout() as targetLayout:
pm.button(l='Target:', c=pm.Callback(self.setTargetFromSelection))
self.target = pm.textField(en=False)
variableTest = mm.eval('whatIs "$prDP_operation"')
if variableTest != 'Unknown':
self.target.setText(mm.eval('$tempMelVar=$prDP_driver'))
targetLayout.redistribute(0, 1)
pm.popupMenu(parent=targetLayout, button=3)
pm.menuItem(label='intermediate of selection',
c=pm.Callback(self.setTargetFromSelectionIntermediate))
pm.menuItem(label='DUPLICATE of selection',
c=pm.Callback(self.setTargetFromDuplicateOfSelection))
with pm.verticalLayout() as operationLayout:
self.operation1 = pm.radioButtonGrp(
labelArray2=['Smooth delta', 'Copy vertex'],
numberOfRadioButtons=2,
columnWidth2=[110, 110],
columnAlign2=['left', 'left'],
changeCommand=pm.Callback(self.syncMelVariablesWithUi))
self.operation1.setSelect(operation + 1)
self.operation2 = pm.radioButtonGrp(
shareCollection=self.operation1,
labelArray2=['Closest point', 'Closest vertex'],
numberOfRadioButtons=2,
columnWidth2=[110, 110],
columnAlign2=['left', 'left'],
changeCommand=pm.Callback(self.syncMelVariablesWithUi))
pm.separator()
self.operation3 = pm.radioButtonGrp(
shareCollection=self.operation1,
label1='Average vertex',
numberOfRadioButtons=1,
columnWidth=[1, 110],
columnAlign=[1, 'left'],
changeCommand=pm.Callback(self.syncMelVariablesWithUi))
operationLayout.redistribute(5, 5, 1, 5)
pm.popupMenu(parent=operationLayout, button=3)
pm.menuItem(label='toggle menuBar',
c=pm.Callback(self.toggleMenuBar))
with pm.horizontalLayout() as toolLayout:
pm.button(label='Enter Tool', command=pm.Callback(self.enterTool))
pm.button(label='Close', command=pm.Callback(self.close))
toolLayout.redistribute()
mainLayout.redistribute(0, 0, 0, 1)
if setTargetFromSelection and not self.target.getText():
self.setTargetFromSelection()
self.show()
self.syncMelVariablesWithUi()
def syncMelVariablesWithUi(self):
mm.eval('$prDP_driver = "{}"'.format(self.getTarget()))
mm.eval('$prDP_operation = {}'.format(self.getOperation()))
mm.eval('$prDP_space = {}'.format(self.getSpace()))
mm.eval('$prDP_minDeltaLength = {}'.format(self.getMinDeltaLength()))
def enterTool(self):
mm.eval('prDeformPaint_initialize();')
self.syncMelVariablesWithUi()
def close(self):
pm.deleteUI(self._TITLE)
def toggleMenuBar(self):
self.menuBar.setMenuBarVisible(not self.menuBar.getMenuBarVisible())
def getSpace(self):
for x, space in enumerate(self.spaces):
if pm.menuItem(space, q=True, radioButton=True):
return x
raise ValueError('Invalid space')
@staticmethod
def getSpaceStrings(defaultSpace):
spaceDict = defaultdict(list)
for attr in dir(om.MSpace):
if attr.startswith('__'):
continue
spaceDict[getattr(om.MSpace, attr)].append(attr)
spaces = []
for x, space in enumerate([str(s) for s in spaceDict.values()]):
space = space.replace("'", "").replace('[', '').replace(']', '')
spaceValue = False
if x == defaultSpace:
space += ' (default)'
spaceValue = True
spaces.append([space, spaceValue])
return spaces
def getTarget(self):
return self.target.getText()
def setTarget(self, mayaObject):
self.target.setText(mayaObject)
self.syncMelVariablesWithUi()
def setTargetFromSelection(self):
self.setTarget((mc.ls(sl=True, type=['transform', 'mesh']) or [''])[0])
def setTargetFromSelectionIntermediate(self):
target = (mc.ls(sl=True, type=['transform', 'mesh']) or [''])[0]
if mc.ls(target, type='mesh'):
target = mc.listRelatives(target, parent=True)[0]
if target:
children = mc.listRelatives(target, children=True)
target = (mc.ls(children, intermediateObjects=True) or [''])[-1]
self.setTarget(target)
def setTargetFromDuplicateOfSelection(self):
selection = (mc.ls(sl=True, type=['transform', 'mesh']) or [None])[0]
if not selection:
raise ValueError('Nothing selected to duplicate')
duplicate = mc.duplicate(selection)[0]
duplicate = mc.rename(duplicate, duplicate+'_prDP_DUPLICATE')
for attr in ['t', 'tx', 'ty', 'tz',
'r', 'rx', 'ry', 'rz',
's', 'sx', 'sy', 'sz',
'v', 'template']:
fullAttr = '{}.{}'.format(duplicate, attr)
mc.setAttr(fullAttr, lock=False)
for attrInput in mc.listConnections(fullAttr, source=True,
destination=False, p=True) or []:
mc.disconnectAttr(attrInput, fullAttr)
if mc.listRelatives(duplicate, parent=True):
mc.parent(duplicate, world=True)
templateValue = pm.menuItem(self.templateDuplicate, q=True, checkBox=True)
mc.setAttr('{}.template'.format(duplicate), templateValue)
visibilityValue = pm.menuItem(self.visibleDuplicate, q=True, checkBox=True)
mc.setAttr('{}.visibility'.format(duplicate), visibilityValue)
self.setTarget(duplicate)
mc.select(selection)
def getOperation(self):
firstRow = self.operation1.getSelect()
if firstRow != 0:
return firstRow - 1
secondRow = self.operation2.getSelect()
if secondRow != 0:
return secondRow + 1
thirdRow = self.operation3.getSelect()
if thirdRow != 0:
return thirdRow + 3
raise ValueError('Unknown operation')
def setMinDeltaLengthDialog(self):
result = pm.promptDialog(
title='minDeltaLength',
message='Enter new minimum delta length value:\n'
'default = "{0}"'.format(self.minDeltaLengthDefault),
button=['OK', 'Cancel'],
defaultButton='OK',
cancelButton='Cancel',
dismissString='Cancel',
text=self.getMinDeltaLength())
if result == 'OK':
self.setMinDeltaLength(pm.promptDialog(query=True, text=True))
def setMinDeltaLength(self, value):
try:
value = float(value)
except ValueError:
raise ValueError('Given length must be a number: "{}"'.format(value))
if value < 0.0:
raise ValueError('Given length must be greater or equal to 0.0')
self.minDeltaLength.setLabel('minDeltaLength: {}'.format(value))
self.syncMelVariablesWithUi()
def getMinDeltaLength(self):
label = self.minDeltaLength.getLabel()
value = float(label.replace('minDeltaLength: ', ''))
return value
@staticmethod
def getLatestVersion():
mc.launch(web="https://github.com/parzival-roethlein/prmaya")
def initializeMaya(prMovePointsCmdPath='prMovePointsCmd',
prDeformPaintBrushPath='prDeformPaintBrush.mel'):
"""
load the required plugin and mel script
manual usage example:
prDeformPaint.initializeMaya('/home/prthlein/private/code/prmaya/prmaya/plugins/prMovePointsCmd.py',
'/home/prthlein/private/code/prmaya/prmaya/scripts/prDeformPaintBrush.mel')
:param prMovePointsCmdPath: only required if it's not in a MAYA_PLUG_IN_PATH
:param prDeformPaintBrushPath: only required if it's not in a MAYA_SCRIPT_PATH
:return:
"""
if not mc.pluginInfo(prMovePointsCmdPath, q=True, loaded=True):
mc.loadPlugin(prMovePointsCmdPath)
if mm.eval('whatIs "$prDP_operation"') == 'Unknown':
mm.eval('source "{}";'.format(prDeformPaintBrushPath))
def reinitializeMaya(*args, **kwargs):
"""reload plugin and mel script"""
initializeMaya(*args, **kwargs)
mc.unloadPlugin('prMovePointsCmd')
mm.eval('rehash;')
mm.eval('source prDeformPaintBrush;')
initializeMaya(*args, **kwargs)
def getBlendshapeFromMesh(mesh):
"""
:param mesh: mesh or transform (with mesh child)
:return:
"""
if mc.ls(mesh, type='transform'):
transform = mesh
else:
transform = mc.listRelatives(mesh, parent=True)
children = mc.listRelatives(transform, children=True)
orig = mc.ls(children, intermediateObjects=True)
if not orig:
return None
history = mc.listHistory(orig, future=True, groupLevels=True, pruneDagObjects=True)
return (mc.ls(history, type='blendShape') or [None])[0]
def getEditBlendshapeMultiplier(mesh, cacheValue=None):
"""
get a multiplier to normalize the deformation (same deformation strength,
no matter what the edited target and envelope values are)
:param mesh: mesh or transform (with mesh child)
:param cacheValue:
:return:
"""
if cacheValue is not None:
return cacheValue
blendshape = getBlendshapeFromMesh(mesh)
if not blendshape:
return 1.0
targetIndex = mc.getAttr('{}.inputTarget[0].sculptTargetIndex'.format(blendshape))
if targetIndex == -1:
return 1.0
weights = mc.getAttr('{}.weight'.format(blendshape))[0]
weightIndices = mc.getAttr('{}.weight'.format(blendshape), multiIndices=True)
weight = weights[weightIndices.index(targetIndex)]
if weight < 0.0001:
raise ValueError('Edit blendshape weight is too small: {}'.format(weight))
envelope = mc.getAttr('{}.envelope'.format(blendshape))
if envelope < 0.0001:
raise ValueError('Envelope is too small: {}'.format(envelope))
return 1.0/(weight*envelope)
def getMItMeshVertex(meshName):
if not mc.objExists(meshName):
raise ValueError('object does not exist: "{}"'.format(meshName))
selection = om.MSelectionList()
selection.add(meshName)
return om.MItMeshVertex(selection.getDagPath(0))
def getVertexPositions(meshName, vertexIds=None, space=om.MSpace.kObject):
"""
:param meshName: 'myMeshShape'
:param vertexIds: [2, 3, ...] or all if None
:param space: om.MSpace.k___
:return: MPointArray
"""
vertexIter = getMItMeshVertex(meshName)
if vertexIds is None:
vertexIds = range(len(vertexIter))
vertexPositions = om.MPointArray()
for vertexId in vertexIds:
vertexIter.setIndex(vertexId)
vertexPositions.append(vertexIter.position(space))
return vertexPositions
def copyVertex(driverMesh, drivenMesh, minDeltaLength, vertexIds, vertexWeights,
multiplier=None, space=om.MSpace.kObject):
"""
copy vertex position from driver to driven
:param driverMesh: 'driverMeshName'
:param drivenMesh: 'drivenMeshName'
:param minDeltaLength: float
:param vertexIds: [0, 1, ...]
:param vertexWeights: [1.0, 0.5, ...]
:param multiplier: float or detect if None
:param space: om.MSpace.k___
:return:
"""
multiplier = getEditBlendshapeMultiplier(drivenMesh, multiplier)
deltas = []
for vertexId, weight, driverPosition, drivenPosition in izip(
vertexIds,
vertexWeights,
getVertexPositions(driverMesh, vertexIds, space),
getVertexPositions(drivenMesh, vertexIds, space)):
deltas.append((driverPosition - drivenPosition) * weight * | |
import string, re
def processpif(file):
n = 0
remembern = 0 #used for nested tags
while n <> -1:
piferr = '' #clear out these fields so they don't carry over.
pifelse = ''
n = string.find(file, "<pif ", n)
if n <> -1:
n2 = string.find(file, "</pif>", n) + 6
if n2 <> -1:
#right here is where to add nested pif testing.
nestn = string.find(file[n+4:n2], "<pif ")
remembern = 0 + n
while nestn != -1: #find the innermost starting pif
n = n + 4 + nestn
nestn = string.find(file[n+4:n2], "<pif ")
piftag = file[n:n2]
# print 'the piftag:%s\n<br>\n the file-remembern:n2: %s \n<br>\n' % (piftag, file[remembern:n2])
s1 = string.find(piftag, 'stmt="', 0)
s2 = string.find(piftag, '">', s1)
if s2 == -1:
s2 = string.find(piftag, '" >', s1) # this adds a space after the dbl quote
if s1 == -1:
piferr = piferr + 'parse error: no stmt=" found in pif'
elif s2 == -1:
piferr = piferr + 'parse error: pif tag has no close eg. stmt="blahblah">:'
pifstmt = piftag[s1+6:s2]
#test for import here - do other security tests to prevent unauthorized code execution
if string.find(pifstmt, 'import') != -1 or string.find(pifstmt, 'os.') != -1 or pifstmt[0:2] != 'if':
piferr = "PIF statement error - reserved word (eg. import, ':')"
if piferr == '':
pifdata = file[n+s2+2:n2-6]
i = string.find(pifdata, '<pelse>')
if i <> -1:
i1 = string.find(pifdata, '</pelse>', i)
if i1 <> -1:
pifelse = pifdata[i+7:i1]
pifdata = string.replace(pifdata, pifdata[i:i1+8], '')
else:
piferr = piferr + 'parse error: pif-pelse has no closing /pelse tag:'
answer = 'n'
pifexec = pifstmt + ': ' + 'answer = "y"'
try:
exec pifexec
if answer == 'y':
file = string.replace(file, piftag, pifdata)
else:
file = string.replace(file, piftag, pifelse)
except SyntaxError:
piferr = piferr + '<br>error with pif stmt: %s ' % (pifstmt)
piferr = piferr + '<br>try making it more like this: "if 9 <= 10" where you replace the 9 or 10 with a #pinp.value# <br> Remember python if = is if =='
file = string.replace(file, piftag, piferr)
except NameError:
piferr = piferr + '<br>error with pif stmt: %s ' % (pifstmt)
piferr = piferr + '<br>You may not be putting your pif in qoutes...."#pif#"'
file = string.replace(file, piftag, piferr)
else:
file = string.replace(file, piftag, piferr)
# file = re.sub(piftag, piferr, file, count = 1)
n = 0 + remembern
else:
piferr = piferr + '<b>pif has no closing tag </pif><b>'
file = file[0:n] + piferr + file[n:len(file)]
n = 0 + remembern
# print 'n = %s \n<br>\n' % (n)
return file
def processpvif(file):
n = 0
remembern = 0 #used for nested tags
while n <> -1:
pviferr = ''
pvifelse = ''
pvifdata = ''
n = string.find(file, "<pvif ", n)
if n <> -1:
n2 = string.find(file, "</pvif>", n) + 7
if n2 <> -1:
#right here is where to add nested testing.
nestn = string.find(file[n+5:n2], "<pvif ")
remembern = 0 + n
while nestn != -1: #find the innermost starting pvif
n = n + 5 + nestn
nestn = string.find(file[n+5:n2], "<pvif ")
pviftag = file[n:n2]
s1 = string.find(pviftag, 'stmt="', 0)
if s1 == -1:
pviferr = pviferr + 'parse error: no stmt=" found in pvif'
else:
s2 = string.find(pviftag, '">', s1)
if s2 == -1:
s2 = string.find(pviftag, '" >', s1) # this adds a space after the dbl quote
elif s2 == -1:
pviferr = pviferr + 'parse error: pvif tag has no close eg. stmt="blahblah">:'
pvifstmt = pviftag[s1+6:s2]
#test for import here - do other security tests to prevent unauthorized code execution
if string.find(pvifstmt, ':') != -1 or string.find(pvifstmt, 'import') != -1 or string.find(pvifstmt, 'os.') != -1 or pvifstmt[0:2] != 'if':
piferr = "PVIF statement error - reserved word (eg. import, ':') or does not begin with if"
if pviferr == '':
pvifdata = file[n+s2+2:n2-7]
i = string.find(pvifdata, '<pvelse>')
if i <> -1:
i1 = string.find(pvifdata, '</pvelse>', i)
if i1 <> -1:
pvifelse = pvifdata[i+8:i1]
pvifdata = string.replace(pvifdata, pvifdata[i:i1+9], '') #blank out the pvelse part of pvifdata
else:
pviferr = pviferr + 'parse error: pvif-pvelse has no closing /pvelse tag:'
answer = "n"
pvifexec = pvifstmt + ': ' + 'answer = "y"'
try:
exec pvifexec
if answer == 'y':
file = string.replace(file, pviftag, pvifdata)
else:
file = string.replace(file, pviftag, pvifelse)
except SyntaxError:
pvifstmtfix = string.replace(pvifstmt, '<', '<')
pvifstmtfix = string.replace(pvifstmt, '>', '>')
pviferr = pviferr + '<br>SyntaxError with pvif stmt: %s ' % (pvifstmtfix)
pviferr = pviferr + '<br>try making it more like this: "if 9 <= 10" where you replace the 9 or 10 with a pval.pobjid.var.no <br>'
pviferr = pviferr + 'other things to consider, is your pval returning a string instead of a number (or vice versa)?<br>'
pviferr = pviferr + 'Remember python if = is if =='
file = string.replace(file, pviftag, pviferr, 1)
except NameError:
pviferr = pviferr + '<br>error with pvif stmt: %s ' % (pvifstmt)
pviferr = pviferr + '<br>You may not be putting your pvar in qoutes...."#pvar#"'
file = string.replace(file, pviftag, pviferr, 1)
else:
file = string.replace(file, pviftag, pviferr, 1)
n = 0 + remembern
else:
pviferr = pviferr + '<b>pvif has no closing tag </pvif><b>'
file = file[0:n] + pviferr + file[n:len(file)]
n = 0 + remembern
return file
def processpmath(file, end):
n = 0
while n <> -1:
pmatherr = '' #clear out these fields so they don't carry over.
n = string.find(file, "<pmath ", n)
if n <> -1:
n2 = string.find(file, ">", n )+ 1
if n2 <> -1:
pmathtag = file[n:n2]
if string.find(pmathtag, '<pval') == -1 and string.find(pmathtag, '#') == -1: #this is to get passed pmaths with pvals in them.
# print 'pval?' + str(string.find(pmathtag, '<pval')) +pmathtag[1:len(pmathtag)-1] + "end:" + end #testing line
s1 = string.find(pmathtag, 'stmt="', 0)
s2 = string.find(pmathtag, '">', s1)
if s2 == -1:
s2 = string.find(pmathtag, '" >', s1) # this adds a space after the dbl quote
if s1 == -1:
pmatherr = pmatherr + 'parse error: no stmt=" found in pmath'
elif s2 == -1:
pmatherr = pmatherr + 'parse error: pmath tag has no close eg. stmt="blahblah"> '
if pmatherr == '':
pmathstmt = pmathtag[s1+6:s2]
# print pmathstmt #testing line
answer = "Error"
pmathexec = 'answer = (' + pmathstmt + ')'
try:
exec pmathexec
if answer <> 'Error':
file = string.replace(file, pmathtag, str(answer))
else:
file = string.replace(file, pmathtag, 'pio Error in pmath')
except SyntaxError:
if end.upper() == 'END':
pmatherr = pmatherr + '<br>error with pmath stmt: %s ' % (pmathstmt)
# file = re.sub(pmathtag, pmatherr, file, count = 1)
file = string.replace(file, pmathtag, pmatherr)
except NameError:
if end.upper() == 'END':
pmatherr = pmatherr + '<br>error with pmath stmt: %s ' % (pmathstmt)
file = string.replace(file, pmathtag, pmatherr)
# file = re.sub(pmathtag, pmatherr, file, count = 1)
except TypeError:
if end.upper() == 'END':
pmatherr = pmatherr + '<br>error with pmath stmt: %s ...possible empty values' % (pmathstmt)
file = string.replace(file, pmathtag, pmatherr)
file = re.sub(pmathtag, pmatherr, file, count = 1)
# else:
# file = re.sub(pmathtag, pmatherr, file, count = 1)
file = string.replace(file, pmathtag, pmatherr)
else:
pmatherr = pmatherr + '<b>pmath has no closing tag </pmath><b>'
file = file[0:n] + pmatherr + file[n:len(file)]
n = n + 6 #to get next pmathtag...if looping...look here at the file re.subs - string.replaces (re sucks and breaks)
return file
def pstring(file):
#pstring must be formatted like <pstring function="replace('sometext', 'newtext')">the string to run a string function</pstring>
#the <pstring> tag is processed and leaves in the resulting value in it's place
#it should be possible to nest pstrings, so <pstring> tags can contain <pstring> tags. It looks bad in the html, but it should work just fine.
#pstrings are processed at the end of page processing
n = 0
remembern = 0 #used for nested tags
while n <> -1:
ptagerr = ''
pstringfunction = ''
pstring = ''
n = string.find(file, "<pstring ", n)
if n <> -1:
n2 = string.find(file, "</pstring>", n) + 10
if n2 <> -1:
#right here is where to add nested testing.
nestn = string.find(file[n+8:n2], "<pstring ")
remembern = 0 + n
while nestn != -1: #find the innermost starting pstring
n = n + 8 + nestn
nestn = string.find(file[n+8:n2], "<pstring ")
ptag = file[n:n2]
s1 = string.find(ptag, 'function="', 0)
if s1 == -1:
ptagerr = ptagerr + 'parse error: no function=" found in pstring'
else:
s2 = string.find(ptag, '">', s1)
if s2 == -1:
s2 = string.find(ptag, '" >', s1) # this adds a space after the dbl quote
elif s2 == -1:
ptagerr = ptagerr + 'parse error: pstring tag has no close ">:" eg. function="blahblah">:'
pstringfunction = ptag[s1+10:s2]
if ptagerr == '':
pstring = file[n+s2+2:n2-10]
newpstring = ''
pstringexec = 'newpstring = pstring.' + pstringfunction
try:
exec pstringexec
file = string.replace(file, ptag, newpstring)
except SyntaxError:
ptagerr = ptagerr + '<br>SyntaxError with pstring function: %s ' % (pstringfunction)
ptagerr = ptagerr + '<br>try your function in python and see if it works.'
file = string.replace(file, ptag, ptagerr, 1)
except AttributeError, e:
ptagerr = ptagerr + '<br>AttributeError with pstring function: %s ' % (pstringfunction)
ptagerr = ptagerr + '<br>%s ' % str(e)
file = string.replace(file, ptag, ptagerr, 1)
else:
file = string.replace(file, ptag, ptagerr, 1)
n = 0 + remembern
else:
ptagerr = ptagerr + '<b>pstring has no closing tag </pstring><b>'
file = file[0:n] + ptagerr + file[n:len(file)]
n | |
<reponame>alex-bv/envysec
import logging
import os
import pathlib
import queue
import subprocess # WARNING, POSSIBLE SECURITY ISSUE: Bandit report: 'Consider possible security implications associated with subprocess module.'
import threading
class ClamAV():
""" ClamAV command class. This is not a stand-alone scanner.
It depends on original ClamAV and used to perform an easier-control.
Available methods:
public: scan, update
private: __scan, __update, __call_proc, __resolve_path
Required packages (dependencies):
built-in: logging, os, pathlib, queue, subprocess, threading
3-d party: -
To perform a scan, it uses sys.Popen to call for a ClamAV bin with a customized args.
ClamAV official site (2018): www.clamav.net
Cisco (ClamAV owner and maintainer) official site (2018): www.cisco.com
"""
def __init__(self, config: dict, logging_level = 30):
""" ClamAV class used to control ClamAV app.
'config' - dictionary with paths to ClamAV bins (freshclam & clamscan);
'logging_level' - verbosity of logging:
0 - debug,
30 - warnings,
50 - critical.
See 'logging' docs;
"""
logging.basicConfig(level = logging_level,
filemode = 'a',
format=f"%(asctime)s - [%(levelname)s] - %(name)s - (%(filename)s).%(funcName)s(%(lineno)d) - %(message)s",
datefmt='%d.%m.%Y %H:%M:%S')
self.ClamLog = logging.getLogger('ClamAV')
self.ClamLog.debug('Initializing class...')
self.configuration = config
self.clamav_queue = queue.Queue()
self.ClamLog.debug('Class initialized.')
def scan(self, targets: list, args = ['-i', '-r', '--no-summary', '--alert-exceeds-max=no'], exclude = None) -> str:
""" Method used to perform a ClamAV scan.
'targets' - list of paths to be scanned;
'args' - list of arguments to be sent to ClamAV;
'exclude' - list of paths not to be scanned.
Return False if file/dir (target) does not exists, might not be accessed
or in exclude list (see config).
If target exist and not in exclude list, it will call for ClamAV
and yield it\'s output.
Argument 'target' is a file or dir to be scanned.
'Args' are arguments list to be sent to ClamAV bin
(see ClamAV documentations for more).
Argument 'Exclude' is a list with valid paths not to be scanned.
Every in 'Exclude' will be formated individually (file or dir type definition, formation to '--exclude' or '--exclude-dir')
Default scanner behaveour is (arguments descriptions):
show only infected files (-i). It also will show all files, that might not be accessed by ClamAV;
scan recursively (-r). It usefull for scanning whole dir;
do not show summary at the end of scan (--no-summary);
do not show 'exceeds max' errors (--alert-exceeds-max=no).
"""
self.ClamLog.debug('Starting scan.')
def __parse_line(line: str) -> bool:
""" Check if 'line' report thread found.
'line' must ends with 'FOUND' and starts with path to infected file.
Return True if both conditions are met;
Return False if one of conditions was not met.
"""
self.ClamLog.debug('Checking {}'.format(line))
if line.strip().endswith(' FOUND') is True and os.path.exists(line.split(': ')[0]) is True:
self.ClamLog.debug('{} met conditions, return True.'.format(line))
return True
else:
self.ClamLog.debug('{} have not met conditions, return False.'.format(line))
return False
self.ClamLog.debug('Retrieving exceptions...')
if exclude is not None and exclude != []:
exception_list = str() # Used to translate exclude elements to string; example: exclude = ['a/b/c'] -> except_list = '--exclude=a/b/c'
for exception in exclude:
exception_path = self.__resolve_path(exception)
if os.path.isdir(exception_path) is True:
exception_list += '--exclude-dir={}'.format(exception_path)
elif os.path.isfile(exception_path) is True:
exception_list += '--exclude={}'.format(exception_path)
elif os.path.islink(exception_path) is True:
self.ClamLog.info('{} is a symbolic link, trying to follow...'.format(exception_path))
exception_list += '--exclude={}'.format(exception_path)
elif os.path.ismount(exception_path) is True:
self.ClamLog.info('{} is a mount point, trying to continue...'.format(exception_path))
exception_list += '--exclude={}'.format(exception_path)
else:
self.ClamLog.warning('type of {} is not defined, trying to continue...'.format(exception_path))
exception_list += '--exclude={}'.format(exception_path)
exception_list += ' ' # Add space, ClamAV does\'nt support comma-separated lists.
args.append(exception_list.strip()) # Strip whitespace at the end;
self.ClamLog.debug('Checking targets...')
targets = [self.__resolve_path(target) for target in targets]
_targets = list() # Prevent empty 'targets' list to be insert in 'args'.
for target in targets:
if os.path.exists(target) is False:
self.ClamLog.info('{} does not exists, so could not be scanned.'.format(target))
elif target in exclude:
self.ClamLog.info('{} is in exclude list, so will not be scanned.'.format(target))
else:
self.ClamLog.debug('{} added to scan list.'.format(target))
_targets.append(target)
if len(_targets) > 0: # Prevent empty 'targets' list to be insert in 'args'.
for target in _targets:
args.insert(0, target)
else:
self.ClamLog.error('No targets to be scanned has been specified!')
self.ClamLog.info('Maybe target in exclude list or does not exists?')
raise ValueError('''
No targets to be scanned has been specified!
Maybe targets in exclude list or not exists?
''')
self.ClamLog.debug('Starting work...')
for line in self.__call_proc(self.__scan, args = args):
self.ClamLog.debug('Init __parse_line...')
if __parse_line(line) is True:
self.ClamLog.debug('line reports True.')
self.ClamLog.warning('FOUND: {}'.format(str(line)))
yield line
else:
self.ClamLog.debug('line reports False.')
self.ClamLog.warning('unknown line: {}'.format(str(line)))
def update(self, args = ['--stdout', '--show-progress']) -> str:
""" Method used to perform a ClamAV database update.
It yield\'s ClamAV Update output.
Some Linux systems don\'t require manual update.
(see 'freshclamd' state)
'args' are arguments list to be sent to ClamAV bin
(see ClamAV documentations for more).
For more information about ClamAV, see ClamAV documentations.
Default update behaveour is (arguments descriptions):
out any lines to stdout, not to stderr (--stdout);
show update progress (--show-progress).
"""
self.ClamLog.info('ClamAV Update started.')
for line in self.__call_proc(self.__update, args=args):
self.ClamLog.info(line.strip())
yield line
def __scan(self, *args) -> bool:
""" 'Lower-level' method (module) of scan.
Method used to call for ClamAV scanner bin.
It fact, it used to call for ClamAV bin (for example: clamscan.exe on Windows)
and put it\'s output to pipe.
Return True if scan complete successfully.
Raise OSError if OS or memory errors occurred.
Raise ValueError if wrong internal arguments or wrong bin\'s path received.
Args are arguments list to be sent to ClamAV bin.
Available arguments might be found at ClamAV scan documentations or by using --help.
"""
self.ClamLog.debug('Scan started.')
args = list(args)
try: # Bandit report: 'subprocess call - check for execution of untrusted input.', see line 7.
with subprocess.Popen([self.configuration["Scanner"]] + args, stdout=subprocess.PIPE) as scanp:
self.ClamLog.debug('Subprocess opened. (subprocess.Popen)')
for line in iter(scanp.stdout.readline, b''):
self.clamav_queue.put(line)
except MemoryError as memory_err:
self.ClamLog.critical('Failed to perform __scan. Probably not enough memory.')
self.ClamLog.debug('MemoryError arguments: {}'.format(str(memory_err.args)))
raise OSError('System may not perform scan, probably not enough memory.', memory_err.args)
except OSError as os_err:
self.ClamLog.critical("""Failed to call for __scan. Probably, module subprocess.Popen
received wrong bin\'s filename.""")
self.ClamLog.debug('OSError arguments: {}'.format(str(os_err.args)))
raise ValueError('System may not perform scan, probably not system error raised.', os_err.args)
except ValueError as value_err:
self.ClamLog.critical("""Failed to call for __scan. Probably, module subprocess.Popen
called with invalid arguments.""")
self.ClamLog.debug('ValueError arguments: {}'.format(str(value_err.args)))
raise ValueError('Failed to spawn process, probably wrong internal arguments received.', value_err.args)
else:
self.ClamLog.debug('Scan done.')
return True
def __update(self, *args) -> bool:
""" 'Lower-level' database (signatures) update method.
It call for update bin, bin's path taken from configuration.
It fact, it used to call for ClamAV bin (for example: freshclam.exe on Windows)
and put it\'s output to pipe.
Return True if update complete successfully.
Raise OSError if OS or memory errors occurred.
Raise ValueError if wrong internal arguments or wrong bin\'s path received.
Args are arguments list to be sent to ClamAV bin.
Available arguments might be found at ClamAV update documentations or by using --help.
"""
self.ClamLog.debug('Update in fact started.')
args = list(args)
try: # WARN: Bandit report: 'subprocess call - check for execution of untrusted input.', see line 7.
with subprocess.Popen([self.configuration["Updater"]] + args, stdout=subprocess.PIPE) as updatep:
self.ClamLog.debug('Subprocess opened. (subprocess.Popen)')
for line in iter(updatep.stdout.readline, b''):
self.clamav_queue.put(line)
except OSError as os_err:
self.ClamLog.critical("""Failed to call for __update. Probably, module subprocess.Popen
received wrong bin\'s filename.""")
self.ClamLog.debug('OSError arguments: {}'.format(str(os_err.args)))
raise ValueError('Failed to spawn process, probably wrong bin\'s filename received.', os_err.args)
except ValueError as value_err:
self.ClamLog.critical("""Failed to call for __update. Probably, module subprocess.Popen
called with invalid arguments.""")
self.ClamLog.debug('ValueError arguments: {}'.format(str(value_err.args)))
raise ValueError('Failed to spawn process, probably wrong internal arguments received.', value_err.args)
except MemoryError as memory_err:
self.ClamLog.critical('Failed to perform __update. Probably not enough memory.')
self.ClamLog.debug('MemoryError arguments: {}'.format(str(memory_err.args)))
raise MemoryError('System may not perform update, probably not enough memory.', memory_err.args)
else:
self.ClamLog.debug('Update done.')
return True
def __call_proc(self, work: 'function', args = None) -> str:
""" Initialize main work thread.
It used to call for main working function (like scan or update).
'work' - name of function to be called.
'args' - list of arguments to be sent to work function.
Yield work\'s function output.
"""
self.ClamLog.debug('Initialize work thread.')
if args | |
= []
visible_token_buffer = []
first_invis_token = None
first_vis_token = None
for token in page.tokens:
if token.token == PAGE_BEGIN or token.token == PAGE_END:
continue
if token.visible:
if token.whitespace_text and previous_visible:
visible_token_buffer.append(' ')
visible_token_buffer.append(token.token)
if first_vis_token is None:
first_vis_token = token
previous_visible = True
elif previous_visible:
previous_visible = False
if data_as_strings:
datas.append({"page_id": page_id, "visible_token_buffer": ''.join(visible_token_buffer),
"invisible_token_buffer_before": ''.join(invisible_token_buffer_before),
"first_vis_token": first_vis_token, "first_invis_token": first_invis_token})
else:
datas.append({"page_id": page_id, "visible_token_buffer": visible_token_buffer,
"invisible_token_buffer_before": invisible_token_buffer_before,
"first_vis_token": first_vis_token, "first_invis_token": first_invis_token})
invisible_token_buffer_before = []
visible_token_buffer = []
first_invis_token = None
first_vis_token = None
if token.whitespace_text and not previous_visible:
invisible_token_buffer_before.append(' ')
invisible_token_buffer_before.append(token.token)
if first_invis_token is None:
first_invis_token = token
else:
if token.whitespace_text and not previous_visible:
invisible_token_buffer_before.append(' ')
invisible_token_buffer_before.append(token.token)
if first_invis_token is None:
first_invis_token = token
return datas
def getVisibleTokenBuffer(self, page_id):
page = self.getPage(page_id)
previous_visible = False
token_buffer = ""
for token in page.tokens:
if token.token == PAGE_BEGIN or token.token == PAGE_END:
continue
# token_buffer += token.token + " - " + str(token.visible) + "\n"
if token.visible:
if token.whitespace_text and previous_visible:
token_buffer += ' '
token_buffer += token.token
previous_visible = True
elif previous_visible:
previous_visible = False
token_buffer += "\n-------------\n"
return token_buffer
def getStripeFragmentsForSlot(self, start_stripe, direction = 'begin'):
stripe_fragments = []
stripes_to_check = []
if direction == 'begin':
stripes_to_check = reversed(self._stripes[:self._stripes.index(start_stripe)+1])
elif direction == 'end':
stripes_to_check = self._stripes[self._stripes.index(start_stripe):]
previous_loc = start_stripe['page_locations'][self.seed_page_id] + start_stripe['tuple_size']
for stripe in stripes_to_check:
if stripe['page_locations'][self.seed_page_id] + stripe['tuple_size'] == previous_loc:
stripe_fragments.insert(0, stripe)
previous_loc = stripe['page_locations'][self.seed_page_id]
else:
break
return stripe_fragments
def getDebugOutputBuffer(self, page_id):
counter = 0;
start_index = 0;
page = self.getPage(page_id)
page_string = page.getString().replace(PAGE_BEGIN, '').replace(PAGE_END, '')
output_buffer = ''
for stripe in self._stripes:
test_page = page_string[start_index:]
test_stripes = []
test_stripes.append(stripe)
test_rule = self.buildRule(test_stripes)
finder = re.compile(test_rule, re.S)
match = finder.search(test_page)
if match and match.start() >= 0:
output_buffer = output_buffer + test_page[0:match.start()].replace('<', '<')
opacity = self.max_level/(1.0 * stripe['level'] * self.max_level)
output_buffer = output_buffer + "<pre class='stripe' style='opacity:"+ str(opacity)+ ";' title='Stripe "+str(counter)+" / Level "+str(stripe['level'])+"'>"
output_buffer = output_buffer + test_page[match.start():match.end()].replace('<', '<')
output_buffer = output_buffer + "</pre>"
start_index = start_index + match.end()
counter = counter + 1
output_buffer = output_buffer + page_string[start_index:].replace('<', '<')
return output_buffer
def learnListMarkups(self):
list_start_tags = list_tags.keys()
list_locations = []
seed_page = self.getPage(self.seed_page_id)
loc = 0
while loc <= len(seed_page.tokens):
next_loc = loc + 1
for list_start_tag in list_start_tags:
if self.__next_loc_equals(loc, seed_page.tokens, list_start_tag):
logger.info('found ' + list_start_tag + ' at ' + str(loc) + " on " + self.seed_page_id)
end_tag = list_tags[list_start_tag][0]
end = self.__find_list_span(loc+1, seed_page.tokens, list_start_tag, end_tag)
if end > 0:
list_info = {}
list_info['tag'] = list_start_tag
list_info['pages'] = {}
list_info['pages'][self.seed_page_id] = {}
list_info['pages'][self.seed_page_id]['location'] = (loc, end)
list_locations.append( list_info )
logger.info('found ' + end_tag + ' at ' + str(loc) + " on " + self.seed_page_id)
next_loc = end
loc = next_loc
list_locations = self.__trim_and_update_list_locations(list_locations)
markup = {}
list_names = {}
count = 1
for list_location in list_locations:
list_name = '_list'+format(count, '04')
count += 1
row_page_manager = PageManager(self._WRITE_DEBUG_FILES)
for page_id in list_location['pages'].keys():
if page_id not in markup:
markup[page_id] = {}
if list_name not in markup[page_id]:
markup[page_id][list_name] = {}
(start, end) = list_location['pages'][page_id]['location']
page = self.getPage(page_id)
list_text = page.tokens.getTokensAsString(start, end, True)
markup[page_id][list_name]['extract'] = list_text
rows = self.__get_list_rows(page_id, list_location)
list_location['pages'][page_id]['rows'] = rows
for row_info in list_location['pages'][page_id]['rows']:
if 'sequence' not in markup[page_id][list_name]:
markup[page_id][list_name]['sequence'] = []
row_markup = {}
row_markup['sequence_number'] = row_info['sequence_num']
(row_start, row_end) = row_info['location']
## Trim off the start and end tags so we can learn things
row_text_offset_start = 0
for token in page.tokens[row_start:row_end]:
row_text_offset_start += 1
if token.token == '>':
break
row_text_offset_end = 0
for token in reversed(page.tokens[row_start:row_end]):
row_text_offset_end += 1
if token.token == '<':
break
row_text = page.tokens.getTokensAsString(row_start+row_text_offset_start, row_end-row_text_offset_end, True)
row_markup['extract'] = row_text
markup[page_id][list_name]['sequence'].append(row_markup)
page = self.getPage(page_id)
for row in rows:
(start, end) = row['location']
page_name = page_id + str(row['sequence_num'])
row_page_manager.addPage(list_name+page_name, page.tokens.getTokensAsString(start, end, True), False)
row_page_manager.learnStripes()
row_rules = row_page_manager.learnAllRules(in_list=True)
if len(row_rules.rules) > 1:
row_markups, names = row_page_manager.rulesToMarkup(row_rules, remove_html=True)
list_names[list_name] = names
for markup_page in row_markups.keys():
page_id = markup_page.split('.html')[0][len(list_name):] + '.html'
sequence_num = markup_page.split('.html')[-1]
for name in names:
if name in row_markups[markup_page]:
markup[page_id][list_name]['sequence'][int(sequence_num)-1][name] = row_markups[markup_page][name]
return markup, list_names
def __get_list_rows(self, page_id, list_location):
row_infos = []
(start, end) = list_location['pages'][page_id]['location']
page = self.getPage(page_id)
list_text = page.tokens.getTokensAsString(start, end, True)
list_tokens = tokenize(list_text)
start_tag = list_tags[list_location['tag']][1]
end_tag = list_tags[list_location['tag']][2]
loc = 0
sequence = 1
while loc <= len(list_tokens):
next_loc = loc + 1
if self.__next_loc_equals(loc, list_tokens, start_tag):
logger.info('found ' + start_tag + ' at ' + str(loc+start) + " on " + page_id)
end = self.__find_list_span(loc+1, list_tokens, start_tag, end_tag)
if end > 0:
row_info = {}
row_info['start_tag'] = start_tag
row_info['end_tag'] = end_tag
row_info['location'] = (loc+start, end+start)
row_info['sequence_num'] = sequence
sequence += 1
row_infos.append( row_info )
next_loc = end
logger.info('found ' + end_tag + ' at ' + str(end+start) + " on " + page_id)
loc = next_loc
return row_infos
def __trim_and_update_list_locations(self, list_locations):
new_list_locations = []
for list_loc in list_locations:
(start_seed_loc, end_seed_loc) = list_loc['pages'][self.seed_page_id]['location']
start_stripe = None
start_offset = 0
end_stripe = None
end_offset = 0
locations = []
for stripe in self._stripes:
seed_page_stripe_range = range(stripe['page_locations'][self.seed_page_id], stripe['page_locations'][self.seed_page_id] + stripe['tuple_size'])
if start_seed_loc in seed_page_stripe_range:
start_stripe = stripe
start_offset = start_seed_loc - stripe['page_locations'][self.seed_page_id]
if start_stripe:
locations.extend(range(stripe['page_locations'][self.seed_page_id], stripe['page_locations'][self.seed_page_id]+stripe['tuple_size']))
if end_seed_loc in seed_page_stripe_range:
end_stripe = stripe
end_offset = end_seed_loc - stripe['page_locations'][self.seed_page_id]
break
if start_stripe and end_stripe:
continuous_items = []
for k, g in groupby(enumerate(locations), lambda (i, x): i-x):
continuous_items.append(map(itemgetter(1), g))
if len(continuous_items) > 1:
start_stripe_locations = start_stripe['page_locations']
end_stripe_locations = end_stripe['page_locations']
for page_id in start_stripe['page_locations'].keys():
list_loc['pages'][page_id] = {}
list_loc['pages'][page_id]['location'] = {}
list_loc['pages'][page_id]['location'] = (start_stripe_locations[page_id]+start_offset, end_stripe_locations[page_id]+end_offset)
new_list_locations.append(list_loc)
else:
logger.info('Filtered out (' + str(start_seed_loc) + ', ' + str(end_seed_loc) + ') because in the template.')
return new_list_locations
def __next_loc_equals(self, loc, seed_tokens, marker):
tokens = tokenize(marker)
for index in range(0, len(tokens)):
if len(seed_tokens)-1 < (loc+index) or tokens[index].token != seed_tokens[loc+index].token:
return False
return True
def __find_list_span(self, loc, seed_tokens, start_marker, end_marker):
while loc <= len(seed_tokens):
next_loc = loc + 1
if self.__next_loc_equals(loc, seed_tokens, start_marker):
next_loc = self.__find_list_span(loc+1, seed_tokens, start_marker, end_marker)
elif self.__next_loc_equals(loc, seed_tokens, end_marker):
tokens = tokenize(end_marker)
return loc+len(tokens)
loc = next_loc
return -1
def learnStripes(self, markups = {}):
start_time = time.time()
self.blacklist_locations = {}
for page_id in markups:
if page_id not in self.blacklist_locations:
self.blacklist_locations[page_id] = []
for markup in markups[page_id]:
#TODO: ADD starting_token_location and end_token_location
if 'extract' in markups[page_id][markup] and 'sequence' not in markups[page_id][markup]:
shortest_pairs = self.getPossibleLocations(page_id, markups[page_id][markup]['extract'])
if not shortest_pairs:
logger.info("Unable to find markup for %s on page %s: %s", markup, page_id, markups[page_id][markup]['extract'])
for pair in shortest_pairs:
self.blacklist_locations[page_id].extend(range(pair[0], pair[1]+1))
logger.info("--- BLACKLIST LOCATION SETUP: %s seconds ---" % (time.time() - start_time))
special_blacklist_tokens = ['2014',
'2015',
'2016',
'January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December',
'Jan',
'Feb',
'Mar',
'Apr',
'May',
'Jun',
'Jul',
'Aug',
'Sept',
'Sep',
'Oct',
'Nov',
'Dec'
]
intervals = {}
for page in self._pages:
intervals[page] = [0, len(self._pages[page].tuples_by_size[1])]
#ADDING special characters
if page not in self.blacklist_locations:
self.blacklist_locations[page] = []
for special_blacklist_token in special_blacklist_tokens:
shortest_pairs = self.getPossibleLocations(page, special_blacklist_token)
for pair in shortest_pairs:
self.blacklist_locations[page].extend(range(pair[0], pair[1]+1))
start_time = time.time()
unsorted_stripes = self.__create_stripes_recurse__(intervals, self.largest_tuple_size)
logger.info("--- RECURSIVE CREATE STRIPES: %s seconds ---" % (time.time() - start_time))
if not unsorted_stripes:
self._stripes = []
return
start_time = time.time()
sorted_unmerged_stripes = []
for item in sorted(unsorted_stripes.items()):
sorted_unmerged_stripes.append(item[1])
logger.info("--- SORT STRIPES: %s seconds ---" % (time.time() - start_time))
start_time = time.time()
merged_stripes = self.__merge_stripes__(sorted_unmerged_stripes)
logger.info("--- MERGE STRIPES: %s seconds ---" % (time.time() - start_time))
counter = 0;
for s in merged_stripes:
s['id'] = counter
logger.info("Stripe %s: %s", counter, s)
counter = counter + 1
self._stripes = merged_stripes
if self._WRITE_DEBUG_FILES:
for page in self._pages:
with codecs.open('debug'+page+'.html', "w", "utf-8") as myfile:
output_html = DEBUG_HTML.replace('PAGE_ID', page).replace('DEBUG_HTML', self.getDebugOutputBuffer(page))
myfile.write(output_html)
myfile.close()
def listRulesToMarkup(self, rule_set, remove_html = False):
markup = {}
names = {}
for page_id in self._pages:
markup[page_id] = {}
for page_id in self._pages:
page_string = self.getPage(page_id).getString()
extraction = rule_set.extract(page_string)
markup[page_id] = | |
"""Common plot functions."""
import logging
import os
from copy import deepcopy
import cartopy.crs as ccrs
import iris.quickplot
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import yaml
logger = logging.getLogger(__name__)
def _process_axes_functions(axes, axes_functions):
"""Process axes functions of the form `axes.functions(*args, **kwargs)."""
if axes_functions is None:
return None
output = None
for (func, attr) in axes_functions.items():
axes_function = getattr(axes, func)
# Simple functions (argument directly given)
if not isinstance(attr, dict):
try:
out = axes_function(*attr)
except TypeError:
out = axes_function(attr)
# More complicated functions (args and kwargs given)
else:
args = attr.get('args', [])
kwargs = attr.get('kwargs', {})
# Process 'transform' kwargs
if 'transform' in kwargs:
kwargs['transform'] = getattr(axes, kwargs['transform'])
out = axes_function(*args, **kwargs)
# Return legend if possible
if func == 'legend':
output = out
return output
def _check_size_of_parameters(*args):
"""Check if the size of (array-like) args is identical."""
if len(args) < 2:
logger.warning("Less than two arguments given, comparing not possible")
return
arg_0 = args[0]
for arg in args:
try:
if len(arg_0) != len(arg):
raise ValueError("Invalid input: array-like parameters need "
"to have the same size")
except TypeError:
raise TypeError("Invalid input: some parameters are not "
"array-like")
return
def get_path_to_mpl_style(style_file=None):
"""Get path to matplotlib style file."""
if style_file is None:
style_file = 'default.mplstyle'
if not style_file.endswith('.mplstyle'):
style_file += '.mplstyle'
base_dir = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(base_dir, 'styles_python', 'matplotlib',
style_file)
logger.debug("Using matplotlib style: %s", filepath)
return filepath
def get_dataset_style(dataset, style_file=None):
"""Retrieve the style information for the given dataset."""
if style_file is None:
style_file = 'cmip5.yml'
logger.debug("Using default style file {style_file}")
if not style_file.endswith('.yml'):
style_file += '.yml'
base_dir = os.path.dirname(os.path.realpath(__file__))
default_dir = os.path.join(base_dir, 'styles_python')
# Check if style_file is valid
filepath = os.path.join(default_dir, style_file)
if os.path.isfile(filepath):
with open(filepath, 'r') as infile:
style = yaml.safe_load(infile)
else:
raise FileNotFoundError(f"Cannot open style file {filepath}")
logger.debug("Using style file %s for dataset %s", filepath, dataset)
# Check if file has entry for unknown dataset
default_dataset = 'default'
options = ['color', 'dash', 'thick', 'mark', 'avgstd', 'facecolor']
if default_dataset not in style:
raise ValueError(f"Style file {filepath} does not contain section "
f"[{default_dataset}] (used for unknown datasets)")
for option in options:
if option not in style[default_dataset]:
raise ValueError(
f"Style file {filepath} does not contain default information "
f"for '{option}' (under section [{default_dataset}])")
# Check if dataset is available
if not style.get(dataset):
logger.warning(
"Dataset '%s' not found in style file, using default "
"entry", dataset)
return style[default_dataset]
# Get compulsory information
for option in options:
if option not in style[dataset]:
default_option = style[default_dataset][option]
logger.warning(
"No style information '%s' found for dataset '%s', using "
"default value '%s' for unknown datasets", option, dataset,
default_option)
style[dataset][option] = default_option
return style[dataset]
def _check_cube(cube):
"""Check if cube can be used for global plotting."""
required_coords = ['latitude', 'longitude']
coords = [coord.name() for coord in cube.coords(dim_coords=True)]
for coord_name in required_coords:
if coord_name not in coords:
raise iris.exceptions.CoordinateNotFoundError(
f"Cube {cube.summary(shorten=True)} does not contain "
f"necessary coordinate '{coord_name}' for plotting global "
f"filled contour plot")
coords.remove(coord_name)
if coords:
logger.debug("Collapsing coordinates %s by calculating mean", coords)
cube = cube.collapsed(coords, iris.analysis.MEAN)
return cube
def _truncate_colormap(cmap_name, minval=0.0, maxval=1.0, n_colors=100):
"""Truncate colormaps."""
cmap = plt.get_cmap(cmap_name)
new_cmap = colors.LinearSegmentedColormap.from_list(
f'trunc({cmap_name},{minval:.2f},{maxval:.2f})',
cmap(np.linspace(minval, maxval, n_colors)))
return new_cmap
def _get_centered_cmap(cmap_name, vmin, vmax, center, n_colors=100):
"""Get centered colormap."""
if not vmin < center < vmax:
raise ValueError(
f"Expected monotonic increase vmin < center < vmax, got vmin = "
f"{vmin}, vmax = {vmax}, center = {center}")
if center - vmin > vmax - center:
minval = 0.0
maxval = 0.5 + (vmax - center) / (center - vmin) / 2.0
else:
minval = 0.5 - (center - vmin) / (vmax - center) / 2.0
maxval = 1.0
return _truncate_colormap(cmap_name, minval, maxval, n_colors)
def global_contourf(cube,
cbar_center=None,
cbar_label=None,
cbar_range=None,
cbar_ticks=None,
**kwargs):
"""Plot global filled contour plot.
Note
----
This is only possible if the cube has the coordinates `latitude` and
`longitude`. A mean is performed over excessive coordinates.
Parameters
----------
cube : iris.cube.Cube
Cube to plot.
cbar_center : float, optional
Central value for the colormap, useful for diverging colormaps. Can
only be used if ``cbar_range`` is given.
cbar_label : str, optional
Label for the colorbar.
cbar_range : list of float, optional
Range of the colorbar (first and second list element) and number of
distinct colors (third element). See :mod:`numpy.linspace`.
cbar_ticks : list, optional
Ticks for the colorbar.
**kwargs
Keyword argument for :mod:`iris.plot.contourf()`.
Returns
-------
matplotlib.contour.QuadContourSet
Plot object.
Raises
------
iris.exceptions.CoordinateNotFoundError
:class:`iris.cube.Cube` does not contain necessary coordinates
``'latitude'`` and ``'longitude'``.
"""
kwargs = deepcopy(kwargs)
logger.debug("Plotting global filled contour plot for cube %s",
cube.summary(shorten=True))
cube = _check_cube(cube)
# Adapt colormap if necessary
if cbar_center is not None:
if cbar_range is None:
raise ValueError(
"'cbar_center' can only be used if 'cbar_range' is given")
cmap = kwargs.get('cmap', plt.get_cmap())
n_colors = cbar_range[2] if len(cbar_range) > 2 else 100
cmap = _get_centered_cmap(cmap, cbar_range[0], cbar_range[1],
cbar_center, n_colors)
kwargs['cmap'] = cmap
# Create plot
if cbar_range is not None:
levels = np.linspace(*cbar_range)
kwargs['levels'] = levels
axes = plt.axes(projection=ccrs.Robinson(central_longitude=10))
plt.sca(axes)
map_plot = iris.plot.contourf(cube, **kwargs)
# Appearance
axes.gridlines(color='lightgrey', alpha=0.5)
axes.coastlines()
axes.set_global()
colorbar = plt.colorbar(orientation='horizontal', aspect=30)
if cbar_ticks is not None:
colorbar.set_ticks(cbar_ticks)
colorbar.set_ticklabels([str(tick) for tick in cbar_ticks])
elif cbar_range is not None:
ticks = np.linspace(*cbar_range[:2],
10,
endpoint=False,
dtype=type(cbar_range[0]))
colorbar.set_ticks(ticks)
colorbar.set_ticklabels([str(tick) for tick in ticks])
if cbar_label is not None:
colorbar.set_label(cbar_label)
return map_plot
def global_pcolormesh(cube,
cbar_center=None,
cbar_label=None,
cbar_ticks=None,
**kwargs):
"""Plot global color mesh.
Note
----
This is only possible if the cube has the coordinates `latitude` and
`longitude`. A mean is performed over excessive coordinates.
Parameters
----------
cube : iris.cube.Cube
Cube to plot.
cbar_center : float, optional
Central value for the colormap, useful for diverging colormaps. Can
only be used if ``vmin`` and ``vmax`` are given.
cbar_label : str, optional
Label for the colorbar.
cbar_ticks : list, optional
Ticks for the colorbar.
**kwargs
Keyword argument for :mod:`iris.plot.pcolormesh()`.
Returns
-------
matplotlib.contour.QuadContourSet
Plot object.
Raises
------
iris.exceptions.CoordinateNotFoundError
:class:`iris.cube.Cube` does not contain necessary coordinates
``'latitude'`` and ``'longitude'``.
"""
kwargs = deepcopy(kwargs)
logger.debug("Plotting global filled contour plot for cube %s",
cube.summary(shorten=True))
cube = _check_cube(cube)
# Adapt colormap if necessary
if cbar_center is not None:
if not ('vmin' in kwargs and 'vmax' in kwargs):
raise ValueError(
"'cbar_center' can only be used if 'vmin' and 'vmax' are "
"given")
cmap = kwargs.get('cmap', plt.get_cmap())
cmap = _get_centered_cmap(cmap, kwargs['vmin'], kwargs['vmax'],
cbar_center)
kwargs['cmap'] = cmap
# Create plot
axes = plt.axes(projection=ccrs.Robinson(central_longitude=10))
plt.sca(axes)
map_plot = iris.plot.pcolormesh(cube, **kwargs)
# Appearance
axes.gridlines(color='lightgrey', alpha=0.5)
axes.coastlines()
axes.set_global()
colorbar = plt.colorbar(orientation='horizontal', aspect=30)
if cbar_ticks is not None:
colorbar.set_ticks(cbar_ticks)
colorbar.set_ticklabels([str(tick) for tick in cbar_ticks])
if cbar_label is not None:
colorbar.set_label(cbar_label)
return map_plot
def quickplot(cube, plot_type, filename=None, **kwargs):
"""Plot a cube using one of the iris.quickplot functions."""
logger.debug("Creating '%s' plot %s", plot_type, filename)
plot_function = getattr(iris.quickplot, plot_type)
fig = plt.figure()
plot_function(cube, **kwargs)
if filename:
fig.savefig(filename)
return fig
def multi_dataset_scatterplot(x_data, y_data, datasets, filepath, **kwargs):
"""Plot a multi dataset scatterplot.
Notes
-----
Allowed keyword arguments:
* `mpl_style_file` (:obj:`str`): Path to the matplotlib style file.
* `dataset_style_file` (:obj:`str`): Path to the dataset style file.
* `plot_kwargs` (`array-like`): Keyword arguments for the plot (e.g.
`label`, `makersize`, etc.).
* `save_kwargs` (:obj:`dict`): Keyword arguments for saving the plot.
* `axes_functions` (:obj:`dict`): Arbitrary functions for axes, i.e.
`axes.set_title('title')`.
Parameters
----------
x_data : array-like
x data of each dataset.
y_data : array-like
y data of each dataset.
datasets : array-like
Names of the datasets.
filepath : str
Path to which plot is written.
**kwargs
Keyword arguments.
Raises
------
TypeError
A non-valid keyword argument is given or `x_data`, `y_data`, `datasets`
or (if given) `plot_kwargs` is not array-like.
ValueError
`x_data`, `y_data`, `datasets` or `plot_kwargs` do not have the same
size.
"""
# Allowed kwargs
allowed_kwargs = [
'mpl_style_file',
'dataset_style_file',
'plot_kwargs',
'save_kwargs',
'axes_functions',
]
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError("{} is not a valid keyword argument".format(kwarg))
# Check parameters
_check_size_of_parameters(x_data, y_data, datasets,
kwargs.get('plot_kwargs', x_data))
empty_dict = [{} for _ in x_data]
# Create matplotlib instances
plt.style.use(get_path_to_mpl_style(kwargs.get('mpl_style_file')))
(fig, axes) = plt.subplots()
# Plot data
for (idx, dataset) in enumerate(datasets):
style = get_dataset_style(dataset, kwargs.get('dataset_style_file'))
# Fix problem when plotting ps file
facecolor = style['color'] if filepath.endswith('ps') else \
| |
<filename>py/desitarget/tychomatch.py<gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
=====================
desitarget.tychomatch
=====================
Useful Tycho catalog matching and manipulation routines.
"""
import os
import numpy as np
import fitsio
import requests
import pickle
from datetime import datetime
from pkg_resources import resource_filename
from time import time
from astropy.io import ascii
from glob import glob
import healpy as hp
from desitarget import io
from desitarget.internal import sharedmem
from desimodel.footprint import radec2pix
from desitarget.geomask import add_hp_neighbors, radec_match_to, nside2nside
# ADM set up the DESI default logger
from desiutil.log import get_logger
log = get_logger()
# ADM start the clock
start = time()
# ADM columns contained in our version of the Tycho fits files.
tychodatamodel = np.array([], dtype=[
('TYC1', '>i2'), ('TYC2', '>i2'), ('TYC3', '|u1'),
('RA', '>f8'), ('DEC', '>f8'),
('MEAN_RA', '>f8'), ('MEAN_DEC', '>f8'),
('SIGMA_RA', '>f4'), ('SIGMA_DEC', '>f4'),
# ADM these are converted to be in mas/yr for consistency with Gaia.
('PM_RA', '>f4'), ('PM_DEC', '>f4'),
('SIGMA_PM_RA', '>f4'), ('SIGMA_PM_DEC', '>f4'),
('EPOCH_RA', '>f4'), ('EPOCH_DEC', '>f4'),
('MAG_BT', '>f4'), ('MAG_VT', '>f4'), ('MAG_HP', '>f4'), ('ISGALAXY', '|u1'),
('JMAG', '>f4'), ('HMAG', '>f4'), ('KMAG', '>f4'), ('ZGUESS', '>f4')
])
def get_tycho_dir():
"""Convenience function to grab the Tycho environment variable.
Returns
-------
:class:`str`
The directory stored in the $TYCHO_DIR environment variable.
"""
# ADM check that the $TYCHO_DIR environment variable is set.
tychodir = os.environ.get('TYCHO_DIR')
if tychodir is None:
msg = "Set $TYCHO_DIR environment variable!"
log.critical(msg)
raise ValueError(msg)
return tychodir
def get_tycho_nside():
"""Grab the HEALPixel nside to be used throughout this module.
Returns
-------
:class:`int`
The HEALPixel nside number for Tycho file creation and retrieval.
"""
nside = 4
return nside
def grab_tycho(cosmodir="/global/cfs/cdirs/cosmo/staging/tycho2/"):
"""Retrieve the cosmo versions of the Tycho files at NERSC.
Parameters
----------
cosmodir : :class:`str`
The NERSC directory that hosts the Tycho files.
Returns
-------
Nothing
But the Tycho fits file, README are written to $TYCHO_DIR/fits.
Notes
-----
- The environment variable $TYCHO_DIR must be set.
- The fits file is "cleaned up" to conform to DESI Data Systems
standards (e.g. all columns are converted to upper-case).
"""
# ADM check that the TYCHO_DIR is set and retrieve it.
tychodir = get_tycho_dir()
# ADM construct the directory to which to write files.
fitsdir = os.path.join(tychodir, 'fits')
# ADM the directory better be empty for the copy!
if os.path.exists(fitsdir):
if len(os.listdir(fitsdir)) > 0:
msg = "{} should be empty to get TYCHO FITS file!".format(fitsdir)
log.critical(msg)
raise ValueError(msg)
# ADM make the directory, if needed.
else:
log.info('Making TYCHO directory for storing FITS files')
os.makedirs(fitsdir)
# ADM the actual name of the Tycho file and the associated README.
tychofn = "tycho2.kd.fits"
cosmofile = os.path.join(cosmodir, tychofn)
rfile = os.path.join(cosmodir, "README")
# ADM the associated output files.
outfile = os.path.join(fitsdir, tychofn)
routfile = os.path.join(fitsdir, "README")
# ADM read in the Tycho file and header in upper-case.
objs, hdr = fitsio.read(cosmofile, header=True, upper=True)
nobjs = len(objs)
done = np.zeros(nobjs, dtype=tychodatamodel.dtype)
for col in tychodatamodel.dtype.names:
# ADM proper motions need converted to mas/yr.
if "PM" in col:
done[col] = objs[col]*1000
else:
done[col] = objs[col]
# ADM add some information to the header
copydate = datetime.utcnow().isoformat(timespec='seconds')
hdr["COSMODIR"] = cosmodir
hdr["COPYDATE"] = copydate
# ADM write the data.
fitsio.write(outfile, done, extname='TYCHOFITS', header=hdr)
# ADM also update the README.
msg = "\nCopied from: {}\non: {}\nthe specific file being: {}\n".format(
cosmodir, copydate, cosmofile)
with open(rfile) as f:
readme = f.read()
with open(routfile, 'w') as f:
f.write(readme+msg)
log.info('Wrote Tycho FITS file...t={:.1f}s'.format(time()-start))
return
def tycho_fits_to_healpix():
"""Convert files in $TYCHO_DIR/fits to files in $TYCHO_DIR/healpix.
Returns
-------
Nothing
But the archived Tycho FITS files in $TYCHO_DIR/fits are
rearranged by HEALPixel in the directory $TYCHO_DIR/healpix.
The HEALPixel sense is nested with nside=get_tycho_nside(), and
each file in $TYCHO_DIR/healpix is called healpix-xxxxx.fits,
where xxxxx corresponds to the HEALPixel number.
Notes
-----
- The environment variable $TYCHO_DIR must be set.
"""
# ADM the resolution at which the Tycho HEALPix files are stored.
nside = get_tycho_nside()
npix = hp.nside2npix(nside)
# ADM check that the TYCHO_DIR is set.
tychodir = get_tycho_dir()
# ADM construct the directories for reading/writing files.
fitsdir = os.path.join(tychodir, "fits")
tychofn = os.path.join(fitsdir, "tycho2.kd.fits")
hpxdir = os.path.join(tychodir, "healpix")
# ADM make sure the output directory is empty.
if os.path.exists(hpxdir):
if len(os.listdir(hpxdir)) > 0:
msg = "{} must be empty to make Tycho HEALPix files!".format(hpxdir)
log.critical(msg)
raise ValueError(msg)
# ADM make the output directory, if needed.
else:
log.info("Making Tycho directory for storing HEALPix files")
os.makedirs(hpxdir)
# ADM read in the Tycho file and assing Tycho objects to HEALPixels.
objs, allhdr = fitsio.read(tychofn, header=True, upper=True)
pix = radec2pix(nside, objs["RA"], objs["DEC"])
# ADM loop through the pixels and write out the files.
for pixnum in range(npix):
# ADM construct the name of the output file.
outfilename = io.hpx_filename(pixnum)
outfile = os.path.join(hpxdir, outfilename)
# ADM update the header with new information.
hdr = dict(allhdr).copy()
hdr["HPXNSIDE"] = nside
hdr["HPXNEST"] = True
hdr["HPXDATE"] = datetime.utcnow().isoformat(timespec='seconds')
# ADM determine which objects are in this pixel and write out.
done = objs[pix == pixnum]
fitsio.write(outfile, done, extname="TYCHOHPX", header=hdr)
log.info('Wrote Tycho HEALPix files...t={:.1f}s'.format(time()-start))
return
def make_tycho_files():
"""Make the HEALPix-split Tycho files in one fell swoop.
Returns
-------
Nothing
But produces:
- A FITS file with appropriate header and columns from
`tychodatamodel`, and a README in $TYCHO_DIR/fits.
- FITS files reorganized by HEALPixel in $TYCHO_DIR/healpix.
The HEALPixel sense is nested with nside=get_tycho_nside(), and
each file in $TYCHO_DIR/healpix is called healpix-xxxxx.fits,
where xxxxx corresponds to the HEALPixel number.
Notes
-----
- The environment variable $TYCHO_DIR must be set.
"""
t0 = time()
log.info('Begin making Tycho files...t={:.1f}s'.format(time()-t0))
# ADM check that the TYCHO_DIR is set.
tychodir = get_tycho_dir()
# ADM a quick check that the fits and healpix directories are empty
# ADM before embarking on the slower parts of the code.
fitsdir = os.path.join(tychodir, 'fits')
hpxdir = os.path.join(tychodir, 'healpix')
for direc in [fitsdir, hpxdir]:
if os.path.exists(direc):
if len(os.listdir(direc)) > 0:
msg = "{} should be empty to make Tycho files!".format(direc)
log.critical(msg)
raise ValueError(msg)
grab_tycho()
log.info('Copied Tycho FITS file from cosmo...t={:.1f}s'.format(time()-t0))
tycho_fits_to_healpix()
log.info('Rearranged FITS files by HEALPixel...t={:.1f}s'.format(time()-t0))
return
def find_tycho_files(objs, neighbors=True, radec=False):
"""Find full paths to Tycho healpix files for objects by RA/Dec.
Parameters
----------
objs : :class:`~numpy.ndarray`
Array of objects. Must contain the columns "RA" and "DEC".
neighbors : :class:`bool`, optional, defaults to ``True``
Also return all pixels that touch the files of interest
to prevent edge effects (e.g. if a Tycho source is 1 arcsec
away from a primary source and so in an adjacent pixel).
radec : :class:`bool`, optional, defaults to ``False``
If ``True`` then the passed `objs` is an [RA, Dec] list
instead of a rec array that contains "RA" and "DEC".
Returns
-------
:class:`list`
A list of all Tycho files to read to account for objects at
the passed locations.
Notes
-----
- The environment variable $TYCHO_DIR must be set.
"""
# ADM the resolution at which the Tycho HEALPix files are stored.
nside = get_tycho_nside()
# ADM check that the TYCHO_DIR is set and retrieve it.
tychodir = get_tycho_dir()
hpxdir = os.path.join(tychodir, 'healpix')
return io.find_star_files(objs, hpxdir, nside,
neighbors=neighbors, radec=radec)
def find_tycho_files_hp(nside, pixlist, neighbors=True):
"""Find full paths to Tycho healpix files in a set of HEALPixels.
Parameters
----------
nside : :class:`int`
(NESTED) HEALPixel nside.
pixlist : :class:`list` or `int`
A set of HEALPixels at `nside`.
neighbors : :class:`bool`, optional, defaults to ``True``
Also return files corresponding to all neighbors that touch the
pixels in `pixlist` to prevent edge effects (e.g. a Tycho source
is 1 arcsec outside of `pixlist` and so in an adjacent pixel).
Returns
-------
:class:`list`
A list of all Tycho files that need to be read in to account for
objects in the passed list of pixels.
Notes
-----
- The environment variable $TYCHO_DIR must be set.
"""
# ADM the resolution at which the healpix files are stored.
filenside = get_tycho_nside()
# ADM check that the TYCHO_DIR is set and retrieve it.
tychodir = get_tycho_dir()
hpxdir = os.path.join(tychodir, 'healpix')
# ADM work with pixlist as an array.
pixlist = np.atleast_1d(pixlist)
# ADM determine the pixels that touch the passed pixlist.
pixnum = nside2nside(nside, filenside, pixlist)
# ADM if neighbors was sent, then retrieve | |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
"""Implementation of the various template directives."""
from genshi.core import QName, Stream
from genshi.path import Path
from genshi.template.base import TemplateRuntimeError, TemplateSyntaxError, \
EXPR, _apply_directives, _eval_expr
from genshi.template.eval import Expression, ExpressionASTTransformer, \
_ast, _parse
__all__ = ['AttrsDirective', 'ChooseDirective', 'ContentDirective',
'DefDirective', 'ForDirective', 'IfDirective', 'MatchDirective',
'OtherwiseDirective', 'ReplaceDirective', 'StripDirective',
'WhenDirective', 'WithDirective']
__docformat__ = 'restructuredtext en'
class DirectiveMeta(type):
"""Meta class for template directives."""
def __new__(cls, name, bases, d):
d['tagname'] = name.lower().replace('directive', '')
return type.__new__(cls, name, bases, d)
class Directive(object):
"""Abstract base class for template directives.
A directive is basically a callable that takes three positional arguments:
``ctxt`` is the template data context, ``stream`` is an iterable over the
events that the directive applies to, and ``directives`` is is a list of
other directives on the same stream that need to be applied.
Directives can be "anonymous" or "registered". Registered directives can be
applied by the template author using an XML attribute with the
corresponding name in the template. Such directives should be subclasses of
this base class that can be instantiated with the value of the directive
attribute as parameter.
Anonymous directives are simply functions conforming to the protocol
described above, and can only be applied programmatically (for example by
template filters).
"""
__metaclass__ = DirectiveMeta
__slots__ = ['expr']
def __init__(self, value, template=None, namespaces=None, lineno=-1,
offset=-1):
self.expr = self._parse_expr(value, template, lineno, offset)
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
"""Called after the template stream has been completely parsed.
:param template: the `Template` object
:param stream: the event stream associated with the directive
:param value: the argument value for the directive; if the directive was
specified as an element, this will be an `Attrs` instance
with all specified attributes, otherwise it will be a
`unicode` object with just the attribute value
:param namespaces: a mapping of namespace URIs to prefixes
:param pos: a ``(filename, lineno, offset)`` tuple describing the
location where the directive was found in the source
This class method should return a ``(directive, stream)`` tuple. If
``directive`` is not ``None``, it should be an instance of the `Directive`
class, and gets added to the list of directives applied to the substream
at runtime. `stream` is an event stream that replaces the original
stream associated with the directive.
"""
return cls(value, template, namespaces, *pos[1:]), stream
def __call__(self, stream, directives, ctxt, **vars):
"""Apply the directive to the given stream.
:param stream: the event stream
:param directives: a list of the remaining directives that should
process the stream
:param ctxt: the context data
:param vars: additional variables that should be made available when
Python code is executed
"""
raise NotImplementedError
def __repr__(self):
expr = ''
if getattr(self, 'expr', None) is not None:
expr = ' "%s"' % self.expr.source
return '<%s%s>' % (type(self).__name__, expr)
@classmethod
def _parse_expr(cls, expr, template, lineno=-1, offset=-1):
"""Parses the given expression, raising a useful error message when a
syntax error is encountered.
"""
try:
return expr and Expression(expr, template.filepath, lineno,
lookup=template.lookup) or None
except SyntaxError, err:
err.msg += ' in expression "%s" of "%s" directive' % (expr,
cls.tagname)
raise TemplateSyntaxError(err, template.filepath, lineno,
offset + (err.offset or 0))
def _assignment(ast):
"""Takes the AST representation of an assignment, and returns a
function that applies the assignment of a given value to a dictionary.
"""
def _names(node):
if isinstance(node, _ast.Tuple):
return tuple([_names(child) for child in node.elts])
elif isinstance(node, _ast.Name):
return node.id
def _assign(data, value, names=_names(ast)):
if type(names) is tuple:
for idx in range(len(names)):
_assign(data, value[idx], names[idx])
else:
data[names] = value
return _assign
class AttrsDirective(Directive):
"""Implementation of the ``py:attrs`` template directive.
The value of the ``py:attrs`` attribute should be a dictionary or a sequence
of ``(name, value)`` tuples. The items in that dictionary or sequence are
added as attributes to the element:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:attrs="foo">Bar</li>
... </ul>''')
>>> print(tmpl.generate(foo={'class': 'collapse'}))
<ul>
<li class="collapse">Bar</li>
</ul>
>>> print(tmpl.generate(foo=[('class', 'collapse')]))
<ul>
<li class="collapse">Bar</li>
</ul>
If the value evaluates to ``None`` (or any other non-truth value), no
attributes are added:
>>> print(tmpl.generate(foo=None))
<ul>
<li>Bar</li>
</ul>
"""
__slots__ = []
def __call__(self, stream, directives, ctxt, **vars):
def _generate():
kind, (tag, attrib), pos = stream.next()
attrs = _eval_expr(self.expr, ctxt, vars)
if attrs:
if isinstance(attrs, Stream):
try:
attrs = iter(attrs).next()
except StopIteration:
attrs = []
elif not isinstance(attrs, list): # assume it's a dict
attrs = attrs.items()
attrib |= [
(QName(n), v is not None and unicode(v).strip() or None)
for n, v in attrs
]
yield kind, (tag, attrib), pos
for event in stream:
yield event
return _apply_directives(_generate(), directives, ctxt, vars)
class ContentDirective(Directive):
"""Implementation of the ``py:content`` template directive.
This directive replaces the content of the element with the result of
evaluating the value of the ``py:content`` attribute:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<ul xmlns:py="http://genshi.edgewall.org/">
... <li py:content="bar">Hello</li>
... </ul>''')
>>> print(tmpl.generate(bar='Bye'))
<ul>
<li>Bye</li>
</ul>
"""
__slots__ = []
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
raise TemplateSyntaxError('The content directive can not be used '
'as an element', template.filepath,
*pos[1:])
expr = cls._parse_expr(value, template, *pos[1:])
return None, [stream[0], (EXPR, expr, pos), stream[-1]]
class DefDirective(Directive):
"""Implementation of the ``py:def`` template directive.
This directive can be used to create "Named Template Functions", which
are template snippets that are not actually output during normal
processing, but rather can be expanded from expressions in other places
in the template.
A named template function can be used just like a normal Python function
from template expressions:
>>> from genshi.template import MarkupTemplate
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <p py:def="echo(greeting, name='world')" class="message">
... ${greeting}, ${name}!
... </p>
... ${echo('Hi', name='you')}
... </div>''')
>>> print(tmpl.generate(bar='Bye'))
<div>
<p class="message">
Hi, you!
</p>
</div>
If a function does not require parameters, the parenthesis can be omitted
in the definition:
>>> tmpl = MarkupTemplate('''<div xmlns:py="http://genshi.edgewall.org/">
... <p py:def="helloworld" class="message">
... Hello, world!
... </p>
... ${helloworld()}
... </div>''')
>>> print(tmpl.generate(bar='Bye'))
<div>
<p class="message">
Hello, world!
</p>
</div>
"""
__slots__ = ['name', 'args', 'star_args', 'dstar_args', 'defaults']
def __init__(self, args, template, namespaces=None, lineno=-1, offset=-1):
Directive.__init__(self, None, template, namespaces, lineno, offset)
ast = _parse(args).body
self.args = []
self.star_args = None
self.dstar_args = None
self.defaults = {}
if isinstance(ast, _ast.Call):
self.name = ast.func.id
for arg in ast.args:
# only names
self.args.append(arg.id)
for kwd in ast.keywords:
self.args.append(kwd.arg)
exp = Expression(kwd.value, template.filepath,
lineno, lookup=template.lookup)
self.defaults[kwd.arg] = exp
if getattr(ast, 'starargs', None):
self.star_args = ast.starargs.id
if getattr(ast, 'kwargs', None):
self.dstar_args = ast.kwargs.id
else:
self.name = ast.id
@classmethod
def attach(cls, template, stream, value, namespaces, pos):
if type(value) is dict:
value = value.get('function')
return super(DefDirective, cls).attach(template, stream, value,
namespaces, pos)
def __call__(self, stream, directives, ctxt, **vars):
stream = list(stream)
def function(*args, **kwargs):
scope = {}
args = list(args) # make mutable
for name in self.args:
if args:
scope[name] = args.pop(0)
else:
if name in kwargs:
val = kwargs.pop(name)
else:
val = _eval_expr(self.defaults.get(name), ctxt, vars)
scope[name] = val
if not self.star_args is None:
scope[self.star_args] = args
if not self.dstar_args is None:
scope[self.dstar_args] = kwargs
ctxt.push(scope)
for event in _apply_directives(stream, directives, ctxt, vars):
yield event
ctxt.pop()
function.__name__ = self.name
# Store the function reference in the bottom context frame so that it
# doesn't get popped off before processing the template has finished
# FIXME: this makes context data mutable as a side-effect
ctxt.frames[-1][self.name] = function
return []
def __repr__(self):
return '<%s "%s">' % (type(self).__name__, self.name)
class ForDirective(Directive):
"""Implementation of the ``py:for`` template directive for repeating an
element based on an iterable in the context data.
>>> from genshi.template import | |
information about colors, line widths, fonts, and other graphic parameters that can be drawn on a canvas.
Nidium is currently supporting the following context types:
* 2D Context is based on the [2DContext API](http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/).
* A WebGL Context is based on the [WebGL API](https://www.khronos.org/registry/webgl/specs/1.0/).
>Info :
This operation is slow the first time the method is called, and the context can't be changed once it's created.""",
SeesDocs( "document.canvas|Canvas|Canvas.getContext|Canvas.setContext|Canvas.clear" ),
[ExampleDoc("""var canvas = new Canvas(200, 100);
var context = canvas.getContext("2d");
context.fillStyle = "red";
context.fillRect(0, 0, 200, 100);""")],
IS_Dynamic, IS_Public, IS_Slow,
[ ParamDoc( "mode", "Context mode: `2d` or `webgl`", "string", NO_Default, IS_Obligated) ],
ReturnDoc( "The context or null", "CanvasRenderingContext2D|WebGLRenderingContext", nullable=True )
)
FunctionDoc( "Canvas.setContext", "Sets the canvas context.",
SeesDocs( "Canvas.getContext|Canvas.setContext|Canvas.clear" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_Fast,
[ ParamDoc( "obj", "Context object", "CanvasRenderingContext2D", NO_Default, IS_Obligated) ],
NO_Returns
)
FieldDoc( "Canvas.position", """Set the coordinate model that is used for the drawing of the canvas layout.
This may be any of:
* 'absolute' Placement is relative to the top-left corner of the application window.
* 'fixed' Placement is relative to the parent canvas but not affected by the scroll position of the parent.
* 'inline' Placement is relative to the previous sibling. The canvas will be placed to the right of the previous sibling.
* 'inline-break' Same as inline, but if the canvas does not fit inside his parent, the new canvas will be pushed bellow.
* 'relative' Placement is relative to the parent-canvas.
""",
NO_Sees,
[ExampleDoc("""var canvas = new Canvas(200, 400);
var ctx = canvas.getContext("2d");
document.canvas.add(canvas);
ctx.fillStyle = "red";
ctx.fillRect(0, 0, 100, 100);
canvas.left = 100; // 100px relative to window.canvas
canvas.top = 50; // 50px relative to window.canvas
// at this stage, canvas is now positioned at (100,50) relative to its parent (window.canvas)
canvas.position = "absolute";
// at this stage, canvas is now positioned at (100,50) relative to the application window""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"string",
"relative"
)
FieldDoc( "Canvas.width", """Get or set the width of the Canvas.",
The operation is slow when the canvas' context is already instantiated with 'Canvas.getContext', because it needs to restructure several things internally.
If the canvas' context did not have a 'Canvas.getContext' call yet, it is a fast method.""",
SeesDocs( "Canvas.minWidth|Canvas.width|Canvas.maxWidth" ),
[ExampleDoc("""var canvas = new Canvas(100, 100);
canvas.width = 150;""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.minWidth", "Get or set the minimal width of the Canvas.",
SeesDocs( "Canvas.minWidth|Canvas.width|Canvas.maxWidth" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.maxWidth", "Get or set the maximal width of the Canvas.",
SeesDocs( "Canvas.minWidth|Canvas.width|Canvas.maxWidth" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.height", """Get or set the height of the Canvas.
The operation is slow when the canvas' context is already instantiated with 'Canvas.getContext', because it needs to restructure several things internally.
If the canvas' context did not have a 'Canvas.getContext' call yet, it is a fast method.""",
SeesDocs( "Canvas.minHeight|Canvas.height|Canvas.maxHeight" ),
[ExampleDoc("""var canvas = new Canvas(100, 100);
canvas.height = 150;""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.minHeight", "Get or set the minimal height of the Canvas.",
SeesDocs( "Canvas.minHeight|Canvas.height|Canvas.maxHeight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.maxHeight", "Get or set the maximum height of the Canvas.",
SeesDocs( "Canvas.minHeight|Canvas.height|Canvas.maxHeight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.left", """Get or set the position of the top-left corner of the canvas.
The behavior depends on the value of the 'Canvas.position' property.""",
SeesDocs( "Canvas.position|Canvas.left|Canvas.right|Canvas.marginLeft|Canvas.marginRight|Canvas.staticLeft|Canvas.staticRight" ),
[ExampleDoc("""var canvas = new Canvas(200, 400);
var ctx = canvas.getContext("2d");
document.canvas.add(canvas);
ctx.fillStyle = "red";
ctx.fillRect(0, 0, 100, 100);
canvas.left = 100;"""
)],
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.right", """Get or set the position of the top-right corner of the canvas.
The behavior depends on the value of the `Canvas.position` property.""",
SeesDocs( "Canvas.left|Canvas.right|Canvas.marginLeft|Canvas.marginRight|Canvas.staticLeft|Canvas.staticRight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.top", """Get or set the top position of the top-left corner of the canvas.
The behavior depends on the value of the `Canvas.position` property.""",
SeesDocs( "Canvas.top|Canvas.bottom|Canvas.marginTop|Canvas.marginBottom|Canvas.staticTop|Canvas.staticBottom" ),
[ExampleDoc("""var canvas = new Canvas(200, 400);
var ctx = canvas.getContext("2d");
document.canvas.add(canvas);
ctx.fillStyle = "red";
ctx.fillRect(0, 0, 100, 100);
canvas.top = 100;
""") ],
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.bottom", """Get or set the bottom position of the canvas.
The behavior depends on the value of the `Canvas.position` property.""",
SeesDocs( "Canvas.top|Canvas.bottom|Canvas.marginTop|Canvas.marginBottom|Canvas.staticTop|Canvas.staticBottom" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.scrollLeft", """Get or set the horizontal scroll position of the canvas.
The offset is applied to the left property of all with a `fixed` or `absolute` position.
This property is useful to implement mouse scrolling or to move multiple canvases at once.
The value can't be negative unless the property 'Canvas.allowNegativeScroll' is set to 'true'.""",
SeesDocs( "Canvas.scrollTop|Canvas.scrollLeft|Canvas.scrollBottom|Canvas.allowNegativeScroll" ),
[ExampleDoc("""var canvas = new Canvas(200, 400);
var ctx = canvas.getContext("2d");
document.canvas.add(canvas)
ctx.fillStyle = "red";
ctx.fillRect(0, 0, 100, 100);
canvas.left = 100;
document.canvas.offsetLeft = 10;""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.scrollTop", """Get or set the vertical scroll position of the canvas.",
The offset is applied to the left property of all children.
Children with a 'fixed' 'Canvas.position' or an 'absolute' 'Canvas.position' are not impacted.
This property is useful to implement mouse scrolling or to move multiple canvases at once.
The value can't be negative unless the property 'Canvas.allowNegativeScroll' is set to 'true'. """,
SeesDocs( "Canvas.scrollTop|Canvas.scrollLeft|Canvas.scrollBottom|Canvas.allowNegativeScroll" ),
[ExampleDoc("""var canvas = new Canvas(200, 400);
var ctx = canvas.getContext("2d");
document.canvas.add(canvas)
ctx.fillStyle = "red";
ctx.fillRect(0, 0, 100, 100);
canvas.top = 100;
document.canvas.offsetTop = 10;""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"integer",
NO_Default
)
FieldDoc( "Canvas.allowNegativeScroll", "Get or set if negative scrolling is allowed.",
SeesDocs( "Canvas.scrollTop|Canvas.scrollLeft|Canvas.scrollBottom|Canvas.allowNegativeScroll" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.visible", "Get or set the visibility of a canvas.",
SeesDocs( "Canvas.visible|Canvas.opacity|Canvas.overflow|Canvas.coating" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.overflow", """Get or set the overflow of a canvas.
This property defines whether children will be clipped inside the canvas boundaries or not.""",
SeesDocs( "Canvas.visible|Canvas.opacity|Canvas.overflow|Canvas.coating" ),
[ExampleDoc("""var canvas = new Canvas(100, 100);
document.canvas.add(canvas);
var canvas2 = new Canvas(50, 50);
canvas.add(canvas2);
canvas2.left = 50;
canvas2.top = -10;
canvas.overflow = false; """)],
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc("Canvas.scrollable", "Enable or disable scrolling on a canvas.",
SeesDocs("Canvas.scrollableX|Canvas.scrollableY"),
[ExampleDoc("""var c = new Canvas(200, 400);
c.overflow = false;
c.scrollableY = true;
var c2 = new Canvas(200, 1000);
c2.scrollable = false;
var ctx = c2.getContext("2d");
var grd = ctx.createLinearGradient(0,0,0,1000);
grd.addColorStop(0,"blue");
grd.addColorStop(1,"red");
ctx.fillStyle = grd;
ctx.fillRect(0, 0, 200, 1000);
c.add(c2);
document.canvas.add(c);""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
"false"
)
FieldDoc("Canvas.scrollableX", "Enable or disable scrolling on X axis for a canvas.",
SeesDocs("Canvas.scrollable|Canvas.scrollableY"),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
"false"
)
FieldDoc("Canvas.scrollableY", "Enable or disable scrolling on Y axis for a canvas.",
SeesDocs("Canvas.scrollable|Canvas.scrollableX"),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
"false"
)
FieldDoc( "Canvas.coating", """Get or set the coating of a canvas.
Coating is always surrounding the canvas and does not change its size.
Setting a coating greater than 0 allows you to draw at negative coordinates, beyond the bounding rectangle of the canvas.""",
SeesDocs( "Canvas.visible|Canvas.opacity|Canvas.overflow|Canvas.coating" ),
[ExampleDoc("""var canvas = new Canvas(100, 100);
canvas.coating = 100;
canvas.left = 300;
canvas.top = 300;
var ctx = canvas.getContext("2d");
ctx.fillStyle = "red";
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.strokeStyle = "blue";
ctx.strokeRect(-100, -100, canvas.width+200, canvas.height+200);
document.canvas.add(canvas);""")],
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.opacity", """Get or set the opacity of a canvas.
The children of the canvas are affected as well.
The value must be between '0.0' and '1.0'.""",
SeesDocs( "Canvas.visible|Canvas.opacity|Canvas.overflow|Canvas.coating" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.staticLeft", "Get or set the static-left flag of the canvas.",
SeesDocs( "Canvas.left|Canvas.right|Canvas.marginLeft|Canvas.marginRight|Canvas.staticLeft|Canvas.staticRight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.staticRight", "Get or set the static-right flag of the canvas.",
SeesDocs( "Canvas.left|Canvas.right|Canvas.marginLeft|Canvas.marginRight|Canvas.staticLeft|Canvas.staticRight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.staticTop", "Get or set the static-right flag of the canvas.",
SeesDocs( "Canvas.top|Canvas.bottom|Canvas.marginTop|Canvas.marginBottom|Canvas.staticBottom" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.staticBottom", "Get or set the static-top flag of the canvas.",
SeesDocs( "Canvas.top|Canvas.bottom|Canvas.marginTop|Canvas.marginBottom|Canvas.staticTop" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.fluidHeight", "Get or set the fluid-height flag of the canvas.",
SeesDocs( "Canvas.fluidHeight|Canvas.fluidWidth" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.fluidWidth", "Get or set the fluid-width flag of the canvas.",
SeesDocs( "Canvas.fluidHeight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.id", "Get or set the id for this canvas.",
NO_Sees,
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"string",
NO_Default
)
FieldDoc( "Canvas.marginLeft", "Get or set the left margin for the canvas.",
SeesDocs( "Canvas.left|Canvas.right|Canvas.marginRight|Canvas.staticLeft|Canvas.staticRight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.marginRight", "Get or set the right margin for the canvas.",
SeesDocs( "Canvas.left|Canvas.right|Canvas.marginLeft|Canvas.staticLeft|Canvas.staticRight" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"float",
NO_Default
)
FieldDoc( "Canvas.marginTop", "Get or set the top margin for the canvas.",
SeesDocs( "Canvas.top|Canvas.bottom|Canvas.marginBottom|Canvas.staticTop|Canvas.staticBottom" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FieldDoc( "Canvas.marginBottom", "Get or set the bottom margin for the canvas.",
SeesDocs( "Canvas.top|Canvas.bottom|Canvas.marginTop|Canvas.staticTop|Canvas.staticBottom" ),
NO_Examples,
IS_Dynamic, IS_Public, IS_ReadWrite,
"boolean",
NO_Default
)
FunctionDoc( "Canvas.cursor", """Set the cursor for this canvas.
The cursortype may be any of "default"|"arrow"|"beam"|"text"|"pointer"|"grabbing"|"drag"|"hidden"|"none"|"col-resize".""",
| |
attributes: attributes of dimensions, details, measures
and aggregates. Use this method if you need to prepare structures for
any kind of query. For attributes for more specific types of queries
refer to :meth:`Cube.all_fact_attributes` and
:meth:`Cube.all_aggregate_attributes`.
.. versionchanged:: 1.1
Returns all attributes, including aggregates. Original
functionality is available as `all_fact_attributes()`
"""
attributes = []
for dim in self.dimensions:
attributes += dim.attributes
attributes += self.details
attributes += self.measures
attributes += self.aggregates
return attributes
@property
def base_attributes(self):
"""Returns a list of attributes that are not derived from other
attributes, do not depend on other cube attributes, variables or
parameters. Any attribute that has an expression (regardless of it's
contents, it might be a constant) is considered derived attribute.
The list contains also aggregate attributes that are base – for
example attributes that represent pre-aggregated column in a table.
.. versionadded:: 1.1
"""
return [attr for attr in self.all_attributes if attr.is_base]
@property
def all_fact_attributes(self):
"""All cube's attributes from the fact: attributes of dimensions,
details and measures.
.. versionadded:: 1.1
"""
attributes = []
for dim in self.dimensions:
attributes += dim.attributes
attributes += self.details
attributes += self.measures
return attributes
@property
def attribute_dependencies(self):
"""Dictionary of dependencies between attributes. Values are
references of attributes that the key attribute depends on. For
example for attribute `a` which has expression `b + c` the dictionary
would be: `{"a": ["b", "c"]}`. The result dictionary includes all
cubes' attributes and aggregates.
.. versionadded:: 1.1
"""
attributes = self.all_attributes + self.all_aggregate_attributes
return {attr.ref:attr.dependencies for attr in attributes}
@property
def all_aggregate_attributes(self):
"""All cube's attributes for aggregation: attributes of dimensions and
aggregates. """
attributes = []
for dim in self.dimensions:
attributes += dim.attributes
attributes += self.aggregates
return attributes
def attribute(self, attribute):
"""Returns an attribute object (dimension attribute, measure or
detail)."""
# TODO: This should be a dictionary once the Cube object becomes
# immutable
name = str(attribute)
for dim in self.dimensions:
try:
return dim.attribute(name, by_ref=True)
except KeyError:
continue
for detail in self.details:
if detail.name == name:
return detail
for measure in self.measures:
if measure.name == name:
return measure
raise NoSuchAttributeError("Cube '%s' has no attribute '%s'"
% (self.name, attribute))
def get_attributes(self, attributes=None, aggregated=False):
"""Returns a list of cube's attributes. If `aggregated` is `True` then
attributes after aggregation are returned, otherwise attributes for a
fact are considered.
Aggregated attributes contain: dimension attributes and aggregates.
Fact attributes contain: dimension attributes, fact details and fact
measures.
If the list `attributes` is empty, all attributes are returned.
If `simplified_references` is `True` then dimension attribute
references in `attrubutes` are considered simplified, otherwise they
are considered as full (dim.attribute)."""
# TODO: this should be a dictionary created in __init__ once this
# class becomes immutable
if not attributes:
if aggregated:
return self.all_aggregate_attributes
else:
return self.all_fact_attributes
everything = object_dict(self.all_attributes, True)
names = (str(attr) for attr in attributes or [])
result = []
for name in names:
try:
attr = everything[name]
except KeyError:
raise NoSuchAttributeError("Unknown attribute '{}' in cube "
"'{}'".format(name, self.name))
result.append(attr)
return result
def collect_dependencies(self, attributes):
"""Collect all original and dependant cube attributes for
`attributes`, sorted by their dependency: starting with attributes
that don't depend on anything. For exapmle, if the `attributes` is [a,
b] and a = c * 2, then the result list would be [b, c, a] or [c, b,
a].
This method is supposed to be used by backends that can handle
attribute expressions. It is safe to generate a mapping between
logical references and their physical object representations from
expressions in the order of items in the returned list.
.. versionadded:: 1.1
"""
depsorted = collect_dependencies(attributes, self.all_attributes)
return self.get_attributes(depsorted)
def link_dimension(self, dimension):
"""Links `dimension` object or a clone of it to the cube according to
the specification of cube's dimension link. See
:meth:`Dimension.clone` for more information about cloning a
dimension."""
link = self.dimension_links.get(dimension.name)
if link:
dimension = dimension.clone(**link)
self._add_dimension(dimension)
# TODO: this method should be used only during object initialization
def _add_dimension(self, dimension):
"""Add dimension to cube. Replace dimension with same name. Raises
`ModelInconsistencyError` when dimension with same name already exists
in the receiver. """
if not dimension:
raise ArgumentError("Trying to add None dimension to cube '%s'."
% self.name)
elif not isinstance(dimension, Dimension):
raise ArgumentError("Dimension added to cube '%s' is not a "
"Dimension instance. It is '%s'"
% (self.name, type(dimension)))
self._dimensions[dimension.name] = dimension
@property
def dimensions(self):
return list(self._dimensions.values())
def dimension(self, obj):
"""Get dimension object. If `obj` is a string, then dimension with
given name is returned, otherwise dimension object is returned if it
belongs to the cube.
Raises `NoSuchDimensionError` when there is no such dimension.
"""
# FIXME: raise better exception if dimension does not exist, but is in
# the list of required dimensions
if not obj:
raise NoSuchDimensionError("Requested dimension should not be "
"none (cube '{}')".format(self.name))
name = str(obj)
try:
return self._dimensions[str(name)]
except KeyError:
raise NoSuchDimensionError("cube '{}' has no dimension '{}'"
.format(self.name, name))
@property
def distilled_hierarchies(self):
"""Returns a dictionary of hierarchies. Keys are hierarchy references
and values are hierarchy level key attribute references.
.. warning::
This method might change in the future. Consider experimental."""
hierarchies = {}
for dim in self.dimensions:
for hier in dim.hierarchies:
key = (dim.name, hier.name)
levels = [hier_key.ref for hier_key in hier.keys()]
hierarchies[key] = levels
if dim.default_hierarchy_name == hier.name:
hierarchies[(dim.name, None)] = levels
return hierarchies
def to_dict(self, **options):
"""Convert to a dictionary. If `with_mappings` is ``True`` (which is
default) then `joins`, `mappings`, `fact` and `options` are included.
Should be set to ``False`` when returning a dictionary that will be
provided in an user interface or through server API.
"""
out = super(Cube, self).to_dict(**options)
out["locale"] = self.locale
out["category"] = self.category
aggregates = [m.to_dict(**options) for m in self.aggregates]
out["aggregates"] = aggregates
measures = [m.to_dict(**options) for m in self.measures]
out["measures"] = measures
details = [a.to_dict(**options) for a in self.details]
out["details"] = details
if options.get("expand_dimensions"):
limits = defaultdict(dict)
# TODO: move this to metadata as strip_hierarchies()
hierarchy_limits = options.get("hierarchy_limits")
hierarchy_limits = hierarchy_limits or []
for dim, hier, level in hierarchy_limits:
limits[dim][hier] = level
dims = []
for dim in self.dimensions:
limit = limits.get(dim.name)
info = dim.to_dict(hierarchy_limits=limit)
dims.append(info)
else:
dims = [dim.name for dim in self.dimensions]
out["dimensions"] = dims
if options.get("with_mappings"):
out["mappings"] = self.mappings
out["fact"] = self.fact
out["joins"] = self.joins
out["browser_options"] = self.browser_options
out["key"] = self.key
return out
def __eq__(self, other):
if other is None or type(other) != type(self):
return False
if self.name != other.name or self.label != other.label \
or self.description != other.description:
return False
elif self.dimensions != other.dimensions \
or self.measures != other.measures \
or self.aggregates != other.aggregates \
or self.details != other.details \
or self.mappings != other.mappings \
or self.joins != other.joins \
or self.browser_options != other.browser_options \
or self.info != other.info:
return False
return True
def validate(self):
"""Validate cube. See Model.validate() for more information. """
results = []
# Check whether all attributes, measures and keys are Attribute objects
# This is internal consistency chceck
measures = set()
for measure in self.measures:
if not isinstance(measure, Attribute):
results.append(('error',
"Measure '%s' in cube '%s' is not instance"
"of Attribute" % (measure, self.name)))
else:
measures.add(str(measure))
details = set()
for detail in self.details:
if not isinstance(detail, Attribute):
results.append(('error', "Detail '%s' in cube '%s' is not "
"instance of Attribute"
% (detail, self.name)))
if str(detail) in details:
results.append(('error', "Duplicate detail '%s' in cube '%s'"\
% (detail, self.name)))
elif str(detail) in measures:
results.append(('error', "Duplicate detail '%s' in cube '%s'"
" - specified also as measure" \
% (detail, self.name)))
else:
details.add(str(detail))
# 2. check whether dimension attributes are unique
return results
def localize(self, trans):
super(Cube, self).localized(trans)
self.category = trans.get("category", self.category)
attr_trans = trans.get("measures", {})
for attrib in self.measures:
attrib.localize(attr_trans.get(attrib.name, {}))
attr_trans = trans.get("aggregates", {})
for attrib in self.aggregates:
attrib.localize(attr_trans.get(attrib.name, {}))
attr_trans = trans.get("details", {})
for attrib in self.details:
attrib.localize(attr_trans.get(attrib.name, {}))
def localizable_dictionary(self):
# FIXME: this needs revision/testing – it might be broken
locale = {}
locale.update(get_localizable_attributes(self))
mdict = {}
locale["measures"] = mdict
for measure in self.measures:
| |
<gh_stars>0
# The MIT License (MIT)
#
# Copyright (c) 2015-2017 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Communicate with an OpenEVSE equipment
All available methods are described in the BaseOpenEVSE class documentation.
On the UART port
================
Create an instance of the SerialOpenEVSE class to initialize.
This method needs pyserial.
Example:
>>> import openevse
>>> o = openevse.SerialOpenEVSE('/dev/ttyS0')
>>> print o.current_capacity()
Please note OpenEVSE object initialization disables the echo (``$SE 0``).
Usage 1 : on-demand
-------------------
Call any of the "request" methods of ``SerialOpenEVSE``, you get a response.
Periodically call ``SerialOpenEVSE.get_status_change()`` to see if there has been a
status change since the last time it was called.
Please note that you only get the last change : if two changes occured between
two calls, the first one is ignored.
Usage 2 : callback
------------------
You may define a callback function to receive status changes.
The callback function is called automatically if there has been a status change
before a request.
You may call ``SerialOpenEVSE.get_status_change()`` to trigger a check and
execute the callback function if necessary.
Please note that the callback function is executed when the status change is
received, not when it is sent by the OpenEVSE board.
Usage 3 : callback and thread
-----------------------------
When a callback has been defined, you may execute ``SerialOpenEVSE.run_sync()``.
By doing so, a new thread is started in order to receive status changes
synchronously.
This way, the callback thread is executed as soon as a status change is sent
by the OpenEVSE board.
In this case, you cannot use ``SerialOpenEVSE.get_status_change()``.
Use ``SerialOpenEVSE.stop_sync()`` to stop the thread.
Using the wifi kit
==================
Create an instance of the WifiOpenEVSE class to initialize.
Example:
>>> import openevse
>>> o = openevse.WifiOpenEVSE('192.168.42.42')
>>> print o.current_capacity()
"""
import base64
import datetime
import re
try:
import serial
SERIAL = True
except ImportError:
SERIAL = False
import threading
import time
import urllib2
_version = '0.4'
states = {
0: 'unknown',
1: 'not connected',
2: 'connected',
3: 'charging',
4: 'vent required',
5: 'diode check failed',
6: 'gfci fault',
7: 'no ground',
8: 'stuck relay',
9: 'gfci self-test failure',
10: 'over temperature',
254: 'sleeping',
255: 'disabled'
}
_lcd_colors = ['off', 'red', 'green', 'yellow', 'blue', 'violet', 'teal', 'white']
_status_functions = {'disable': 'FD', 'enable': 'FE', 'sleep': 'FS'}
_lcd_types = ['monochrome', 'rgb']
_service_levels = ['A', '1', '2']
# Timeouts in seconds
STANDARD_SERIAL_TIMEOUT = 0.5
RESET_SERIAL_TIMEOUT = 10
STATUS_SERIAL_TIMEOUT = 0
SYNC_SERIAL_TIMEOUT = 0.5
NEWLINE_MAX_AGE = 5
CORRECT_RESPONSE_PREFIXES = ('$OK', '$NK')
class EvseError(Exception):
pass
class EvseTimeoutError(EvseError):
pass
class NoClock(EvseError):
pass
class NotCharging(EvseError):
pass
class BaseOpenEVSE:
"""Inherit from this class"""
def _silent_request(self, *args):
"""Send a request and ignore its response"""
raise NotImplementedError
def _request(self, *args):
"""Send a request and return its response"""
raise NotImplementedError
def _reinitialize(self):
"""Reinitialize the connection after a reset.
Overload this method if needed"""
return
def _flags(self):
"""Get EVSE controller flags
Specific values:
* service_level: 1 or 2
* lcd_type: 'monochrome' or 'rgb'
True for enabled, False for disabled:
* auto_service_level
* diode_check
* gfi_self_test
* ground_check
* stuck_relay_check
* vent_required
* auto_start
* serial_debug
"""
done, data = self._request('GE')
if done:
flags = int(data[1], 16)
else:
raise EvseError
return {
'service_level': (flags & 0x0001) + 1,
'diode_check': not flags & 0x0002,
'vent_required': not flags & 0x0004,
'ground_check': not flags & 0x0008,
'stuck_relay_check': not flags & 0x0010,
'auto_service_level': not flags & 0x0020,
'auto_start': not flags & 0x0040,
'serial_debug': not not flags & 0x0080,
'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',
'gfi_self_test': not flags & 0x0200
}
def reset(self):
"""Reset the OpenEVSE"""
self._silent_request('FR')
self._reinitialize()
time.sleep(1) # Let the OpenEVSE finish its boot sequence...
def lcd_backlight_color(self, color='off'):
"""Change the LCD backlight color
Allowed colors:
* off
* red
* green
* yellow
* blue
* violet
* teal
* white
Default: off (disable the backlight)
"""
colorcode = _lcd_colors.index(color)
if self._request('FB', str(colorcode))[0]:
return True
raise EvseError
def status(self, action=None):
"""Change the EVSE status.
If an action is not specified, the status is requested
Allowed actions:
* enable
* disable
* sleep
Default: no action, request the status
Returns the status of the EVSE as a string
"""
if action:
function = _status_functions[action]
done, data = self._request(function)
if done:
if data:
return states[int(data[0], 16)]
else:
raise EvseError
done, data = self._request('GS')
if done:
return states[int(data[0])]
raise EvseError
def display_text(self, x, y, text):
"""Display a given text on the LCD screen.
Arguments:
* x and y: cursor position
* text: text to display
"""
if self._request('FP', str(x), str(y), str(text))[0]:
return True
raise EvseError
def lcd_type(self, lcdtype=None):
"""Change the LCD type
Allowed types:
* monochrome
* rgb
If lcdtype is not specified, the current type is returned
Returns the LCD type ("monochrome" or "rgb")
"""
if lcdtype:
typecode = _lcd_types.index(lcdtype)
if self._request('S0', str(typecode))[0]:
return lcdtype
else:
return self._flags()['lcd_type']
raise EvseError
def time(self, the_datetime=None):
"""Set or get the RTC time
Argument:
* a datetime object
If the datetime object is not specified, get the current OpenEVSE clock
Returns a datetime object
"""
if the_datetime:
if self._request(
'S1',
the_datetime.strftime('%y'), str(the_datetime.month), str(the_datetime.day),
str(the_datetime.hour), str(the_datetime.minute), str(the_datetime.second)
)[0]:
return the_datetime
else:
done, data = self._request('GT')
if done:
if data == ['165', '165', '165', '165', '165', '85']:
raise NoClock
return datetime.datetime(
year=int(data[0])+2000, month=int(data[1]), day=int(data[2]),
hour=int(data[3]), minute=int(data[4]), second=int(data[5])
)
raise EvseError
def ammeter_calibration(self, enabled=True):
"""Enable or disable ammeter calibration mode"""
if self._request('S2', str(int(enabled)))[0]:
return True
raise EvseError
def time_limit(self, limit=None):
"""Get or set the charge time limit, in minutes.
This time is rounded to the nearest quarter hour.
The maximum value is 3825 minutes.
Returns the limit
"""
if limit is None:
done, data = self._request('G3')
if done:
return int(data[0])*15
else:
limit = int(round(limit/15.0))
if self._request('S3', str(limit))[0]:
return limit
raise EvseError
def ammeter_settings(self, scalefactor=None, offset=None):
"""Set or get the ammeter settings
If either of the arguments is None, get the values instead of setting them.
Returns a (scalefactor, offset) tuple
"""
if scalefactor is not None and offset is not None:
if self._request('SA', str(scalefactor), str(offset))[0]:
return scalefactor, offset
else:
done, data = self._request('GA')
if done:
return int(data[0]), int(data[1])
raise EvseError
def current_capacity(self, capacity=None):
"""Set or get the current capacity
If capacity is None or 0, get the value
Returns the capacity in amperes
"""
if capacity:
if self._request('SC', str(capacity))[0]:
return capacity
else:
done, data = self._request('GE')
if done:
return int(data[0])
raise EvseError
def diode_check(self, enabled=None):
"""
if enabled == True, enable the diode check
if enabled == False, disable the diode check
if enabled is not specified, request the diode check status
Returns the diode check status
"""
if enabled is None:
return self._flags()['diode_check']
if self._request('FF', 'D', '1' if enabled else '0')[0]:
return enabled
raise EvseError
def echo(self, enabled=True):
"""Enable or disable echo
THIS LIBRARY IS NOT MEANT TO BE USED WITH ECHO ENABLED
"""
if self._request('FF', 'E', '1' if enabled else '0')[0]:
return True
raise EvseError
def gfi_self_test(self, enabled=None):
"""
if enabled == True, enable the GFI self test
if enabled == False, disable the GFI self test
if enabled is not specified, request the GFI self test status
Returns the GFI self test status
"""
if enabled is None:
return self._flags()['gfi_self_test']
if self._request('FF', 'F', '1' if enabled else '0')[0]:
return enabled
raise EvseError
def ground_check(self, enabled=None):
"""
if enabled == True, enable the ground check
if enabled == False, disable the ground check
if enabled is not specified, request the | |
"set_project"
:param indata: data to be inserted
:param kwargs: used to override the indata descriptor
:param headers: http request headers
:return: _id: identity of the inserted data, operation _id (None)
"""
try:
content = BaseTopic._remove_envelop(indata)
# Override descriptor with query string kwargs
BaseTopic._update_input_with_kwargs(content, kwargs)
content = self._validate_input_new(content, session["force"])
self.check_conflict_on_new(session, content)
self.format_on_new(content, project_id=session["project_id"], make_public=session["public"])
_id = self.auth.create_project(content)
rollback.append({"topic": self.topic, "_id": _id})
# self._send_msg("created", content, not_send_msg=not_send_msg)
return _id, None
except ValidationError as e:
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY)
def show(self, session, _id):
"""
Get complete information on an topic
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: server internal id
:return: dictionary, raise exception if not found.
"""
# Allow _id to be a name or uuid
filter_q = {self.id_field(self.topic, _id): _id}
# projects = self.auth.get_project_list(filter_q=filter_q)
projects = self.list(session, filter_q) # To allow default filtering (Bug 853)
if len(projects) == 1:
return projects[0]
elif len(projects) > 1:
raise EngineException("Too many projects found", HTTPStatus.CONFLICT)
else:
raise EngineException("Project not found", HTTPStatus.NOT_FOUND)
def list(self, session, filter_q=None, api_req=False):
"""
Get a list of the topic that matches a filter
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param filter_q: filter of data to be applied
:return: The list, it can be empty if no one match the filter.
"""
project_list = self.auth.get_project_list(filter_q)
if not session["allow_show_user_project_role"]:
# Bug 853 - Default filtering
user = self.auth.get_user(session["username"])
projects = [prm["project"] for prm in user["project_role_mappings"]]
project_list = [proj for proj in project_list if proj["_id"] in projects]
return project_list
def delete(self, session, _id, dry_run=False, not_send_msg=None):
"""
Delete item by its internal _id
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: server internal id
:param dry_run: make checking but do not delete
:param not_send_msg: To not send message (False) or store content (list) instead
:return: dictionary with deleted item _id. It raises EngineException on error: not found, conflict, ...
"""
# Allow _id to be a name or uuid
proj = self.auth.get_project(_id)
pid = proj["_id"]
self.check_conflict_on_del(session, pid, proj)
if not dry_run:
v = self.auth.delete_project(pid)
return v
return None
def edit(self, session, _id, indata=None, kwargs=None, content=None):
"""
Updates a project entry.
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id:
:param indata: data to be inserted
:param kwargs: used to override the indata descriptor
:param content:
:return: _id: identity of the inserted data.
"""
indata = self._remove_envelop(indata)
# Override descriptor with query string kwargs
if kwargs:
BaseTopic._update_input_with_kwargs(indata, kwargs)
try:
if not content:
content = self.show(session, _id)
indata = self._validate_input_edit(indata, content, force=session["force"])
self.check_conflict_on_edit(session, content, indata, _id=_id)
self.format_on_edit(content, indata)
deep_update_rfc7396(content, indata)
self.auth.update_project(content["_id"], content)
except ValidationError as e:
raise EngineException(e, HTTPStatus.UNPROCESSABLE_ENTITY)
class RoleTopicAuth(BaseTopic):
topic = "roles"
topic_msg = None # "roles"
schema_new = roles_new_schema
schema_edit = roles_edit_schema
multiproject = False
def __init__(self, db, fs, msg, auth):
BaseTopic.__init__(self, db, fs, msg, auth)
# self.auth = auth
self.operations = auth.role_permissions
# self.topic = "roles_operations" if isinstance(auth, AuthconnKeystone) else "roles"
@staticmethod
def validate_role_definition(operations, role_definitions):
"""
Validates the role definition against the operations defined in
the resources to operations files.
:param operations: operations list
:param role_definitions: role definition to test
:return: None if ok, raises ValidationError exception on error
"""
if not role_definitions.get("permissions"):
return
ignore_fields = ["admin", "default"]
for role_def in role_definitions["permissions"].keys():
if role_def in ignore_fields:
continue
if role_def[-1] == ":":
raise ValidationError("Operation cannot end with ':'")
match = next((op for op in operations if op == role_def or op.startswith(role_def + ":")), None)
if not match:
raise ValidationError("Invalid permission '{}'".format(role_def))
def _validate_input_new(self, input, force=False):
"""
Validates input user content for a new entry.
:param input: user input content for the new topic
:param force: may be used for being more tolerant
:return: The same input content, or a changed version of it.
"""
if self.schema_new:
validate_input(input, self.schema_new)
self.validate_role_definition(self.operations, input)
return input
def _validate_input_edit(self, input, content, force=False):
"""
Validates input user content for updating an entry.
:param input: user input content for the new topic
:param force: may be used for being more tolerant
:return: The same input content, or a changed version of it.
"""
if self.schema_edit:
validate_input(input, self.schema_edit)
self.validate_role_definition(self.operations, input)
return input
def check_conflict_on_new(self, session, indata):
"""
Check that the data to be inserted is valid
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param indata: data to be inserted
:return: None or raises EngineException
"""
# check name is not uuid
role_name = indata.get("name")
if is_valid_uuid(role_name):
raise EngineException("role name '{}' cannot have an uuid format".format(role_name),
HTTPStatus.UNPROCESSABLE_ENTITY)
# check name not exists
name = indata["name"]
# if self.db.get_one(self.topic, {"name": indata.get("name")}, fail_on_empty=False, fail_on_more=False):
if self.auth.get_role_list({"name": name}):
raise EngineException("role name '{}' exists".format(name), HTTPStatus.CONFLICT)
def check_conflict_on_edit(self, session, final_content, edit_content, _id):
"""
Check that the data to be edited/uploaded is valid
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param final_content: data once modified
:param edit_content: incremental data that contains the modifications to apply
:param _id: internal _id
:return: None or raises EngineException
"""
if "default" not in final_content["permissions"]:
final_content["permissions"]["default"] = False
if "admin" not in final_content["permissions"]:
final_content["permissions"]["admin"] = False
# check name is not uuid
role_name = edit_content.get("name")
if is_valid_uuid(role_name):
raise EngineException("role name '{}' cannot have an uuid format".format(role_name),
HTTPStatus.UNPROCESSABLE_ENTITY)
# Check renaming of admin roles
role = self.auth.get_role(_id)
if role["name"] in ["system_admin", "project_admin"]:
raise EngineException("You cannot rename role '{}'".format(role["name"]), http_code=HTTPStatus.FORBIDDEN)
# check name not exists
if "name" in edit_content:
role_name = edit_content["name"]
# if self.db.get_one(self.topic, {"name":role_name,"_id.ne":_id}, fail_on_empty=False, fail_on_more=False):
roles = self.auth.get_role_list({"name": role_name})
if roles and roles[0][BaseTopic.id_field("roles", _id)] != _id:
raise EngineException("role name '{}' exists".format(role_name), HTTPStatus.CONFLICT)
def check_conflict_on_del(self, session, _id, db_content):
"""
Check if deletion can be done because of dependencies if it is not force. To override
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: internal _id
:param db_content: The database content of this item _id
:return: None if ok or raises EngineException with the conflict
"""
role = self.auth.get_role(_id)
if role["name"] in ["system_admin", "project_admin"]:
raise EngineException("You cannot delete role '{}'".format(role["name"]), http_code=HTTPStatus.FORBIDDEN)
# If any user is using this role, raise CONFLICT exception
if not session["force"]:
for user in self.auth.get_user_list():
for prm in user.get("project_role_mappings"):
if prm["role"] == _id:
raise EngineException("Role '{}' ({}) is being used by user '{}'"
.format(role["name"], _id, user["username"]), HTTPStatus.CONFLICT)
@staticmethod
def format_on_new(content, project_id=None, make_public=False): # TO BE REMOVED ?
"""
Modifies content descriptor to include _admin
:param content: descriptor to be modified
:param project_id: if included, it add project read/write permissions
:param make_public: if included it is generated as public for reading.
:return: None, but content is modified
"""
now = time()
if "_admin" not in content:
content["_admin"] = {}
if not content["_admin"].get("created"):
content["_admin"]["created"] = now
content["_admin"]["modified"] = now
if "permissions" not in content:
content["permissions"] = {}
if "default" not in content["permissions"]:
content["permissions"]["default"] = False
if "admin" not in content["permissions"]:
content["permissions"]["admin"] = False
@staticmethod
def format_on_edit(final_content, edit_content):
"""
Modifies final_content descriptor to include the modified date.
:param final_content: final descriptor generated
:param edit_content: alterations to be include
:return: None, but final_content is modified
"""
if "_admin" in final_content:
final_content["_admin"]["modified"] = time()
if "permissions" not in final_content:
final_content["permissions"] = {}
if "default" not in final_content["permissions"]:
final_content["permissions"]["default"] = False
if "admin" not in final_content["permissions"]:
final_content["permissions"]["admin"] = False
return None
def show(self, session, _id):
"""
Get complete information on an topic
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param _id: server internal id
:return: dictionary, raise exception if not found.
"""
filter_q = {BaseTopic.id_field(self.topic, _id): _id}
# roles = self.auth.get_role_list(filter_q)
roles = self.list(session, filter_q) # To allow default filtering (Bug 853)
if not roles:
raise AuthconnNotFoundException("Not found any role with filter {}".format(filter_q))
elif len(roles) > 1:
raise AuthconnConflictException("Found more than one role with filter {}".format(filter_q))
return roles[0]
def list(self, session, filter_q=None, api_req=False):
"""
Get a list of the topic that matches a filter
:param session: contains "username", "admin", "force", "public", "project_id", "set_project"
:param filter_q: filter of data to be applied
:return: The list, it can be empty if no one match the filter.
"""
role_list = self.auth.get_role_list(filter_q)
if | |
<reponame>IRMVLab/ASTA3DConv
""" PointNet++ Layers
Author: <NAME>
Modified by <NAME>
Date: November 2019
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/sampling'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/grouping'))
sys.path.append(os.path.join(ROOT_DIR, 'tf_ops/3d_interpolation'))
from tf_sampling import farthest_point_sample, gather_point
from tf_grouping import query_ball_point, query_ball_point_var_rad, group_point, knn_point
from tf_interpolate import three_nn, three_interpolate
import tensorflow as tf
import numpy as np
import tf_util
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
'''
Input:
npoint: int32
radius: float32
nsample: int32
xyz: (batch_size, ndataset, 3) TF tensor
points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
knn: bool, if True use kNN instead of radius search
use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
Output:
new_xyz: (batch_size, npoint, 3) TF tensor
new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
(subtracted by seed point XYZ) in local regions
'''
sample_idx = farthest_point_sample(npoint, xyz)
new_xyz = gather_point(xyz, sample_idx)
if knn:
_,idx = knn_point(nsample, xyz, new_xyz)
else:
idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
if points is not None:
grouped_points = group_point(points, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
return new_xyz, new_points, idx, sample_idx, grouped_xyz
def sample_and_group_all(xyz, points, use_xyz=True):
'''
Inputs:
xyz: (batch_size, ndataset, 3) TF tensor
points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
Outputs:
new_xyz: (batch_size, 1, 3) as (0,0,0)
new_points: (batch_size, 1, ndataset, 3+channel) TF tensor
Note:
Equivalent to sample_and_group with npoint=1, radius=inf, use (0,0,0) as the centroid
'''
batch_size = xyz.get_shape()[0].value
nsample = xyz.get_shape()[1].value
new_xyz = tf.constant(np.tile(np.array([0,0,0]).reshape((1,1,3)), (batch_size,1,1)),dtype=tf.float32)
idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1,1,nsample)), (batch_size,1,1)))
grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3))
if points is not None:
if use_xyz:
new_points = tf.concat([xyz, points], axis=2)
else:
new_points = points
new_points = tf.expand_dims(new_points, 1)
else:
new_points = grouped_xyz
return new_xyz, new_points, idx, grouped_xyz
def meteor_direct_module(xyz, time, points, num_point,npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope, bn=True, pooling='max', knn=False, use_xyz=True, use_nchw=False):
'''
Input:
xyz: (batch_size, ndataset, 3) TF tensor
time: (batch_size, ndataset, 1) TF tensor
points: (batch_size, ndataset, channel) TF tensor
npoint: int32 -- #points sampled in farthest point sampling
radius: list of float32 -- search radiuses in local region
nsample: int32 -- how many points in each local region
mlp: list of int32 -- output size for MLP on each point
mlp2: list of int32 -- output size for MLP on each region
group_all: bool -- group all points into one PC if set true, OVERRIDE
npoint, radius and nsample settings
use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
Return:
new_xyz: (batch_size, npoint, 3) TF tensor
new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
idx: (batch_size, npoint, nsample) int32 -- indices for local regions
'''
data_format = 'NCHW' if use_nchw else 'NHWC'
sample_idx = None
batch_size = xyz.get_shape()[0].value
with tf.variable_scope(scope) as sc:
##### sample and group with variable radius
sample_idx = farthest_point_sample(npoint, xyz)
new_xyz = gather_point(xyz, sample_idx)
new_time = gather_point(time, sample_idx)
time_ = tf.reshape(time, [batch_size, 1, -1])
new_time_ = tf.abs(new_time - time_)
radius_ = tf.gather(radius, tf.cast(new_time_, tf.int32))
idx, pts_cnt = query_ball_point_var_rad(radius_, nsample, xyz, new_xyz)
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
if points is not None:
grouped_points = group_point(points, idx)
grouped_time = group_point(time, idx)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_points, grouped_time], axis=-1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
# Point Feature Embedding
for i, num_out_channel in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=bn, is_training=is_training,
scope='conv%d'%(i), bn_decay=bn_decay,
data_format=data_format)
new_points = tf.reduce_max(new_points, axis=[2], name='maxpool')
return new_xyz, new_time, new_points, idx
def meteor_direct_module8(xyz, time, points, num_point,npoint, radius, nsample, mlp, mlp2, group_all, is_training, bn_decay, scope,
bn=True,side=1,side_scale=1, radius_scale=0.25, pooling='max', knn=False, use_xyz=True, use_nchw=False):
'''
Input:
xyz: (batch_size, ndataset, 3) TF tensor
time: (batch_size, ndataset, 1) TF tensor
points: (batch_size, ndataset, channel) TF tensor
npoint: int32 -- #points sampled in farthest point sampling
radius: list of float32 -- search radiuses in local region
nsample: int32 -- how many points in each local region
mlp: list of int32 -- output size for MLP on each point
mlp2: list of int32 -- output size for MLP on each region
group_all: bool -- group all points into one PC if set true, OVERRIDE
npoint, radius and nsample settings
use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
use_nchw: bool, if True, use NCHW data format for conv2d, which is usually faster than NHWC format
Return:
new_xyz: (batch_size, npoint, 3) TF tensor
new_points: (batch_size, npoint, mlp[-1] or mlp2[-1]) TF tensor
idx: (batch_size, npoint, nsample) int32 -- indices for local regions
'''
data_format = 'NCHW' if use_nchw else 'NHWC'
print(xyz)
sample_idx = None
batch_size = xyz.get_shape()[0].value
with tf.variable_scope(scope) as sc:
##### sample and group with variable radius
# <------My Code: Find 2048 * 27
delta_xyz = [[0.4714, -0.8165, -0.3333], [0.4714, 0.8165, -0.3333], [-0.9428, 0.0, -0.3333], [0.0, 0.0, 1.0]]
standard=radius[0]
radius=(radius*radius_scale)
standard=standard*radius_scale
delta_xyz = tf.reshape(tf.convert_to_tensor(delta_xyz), [1, 1, 4, 3]) # inside_covert [27,3] outside_reshape
sample_idx = farthest_point_sample(npoint, xyz) # index of 2048 from current frame
new_xyz_core = gather_point(xyz, sample_idx)
new_xyz = tf.expand_dims(new_xyz_core, 2)
# side=1: side of cube
new_anchor_xyz = tf.reshape(new_xyz + delta_xyz*standard, [batch_size, npoint * 4, 3])
new_time_core = gather_point(time, sample_idx)
new_time = tf.tile(new_time_core, [1, 4, 1])
time_a = tf.tile(new_time, [1, 1, 8])
time_b = tf.reshape(time_a, [batch_size, npoint * 4, 8, 1])
time_ = tf.reshape(time, [batch_size, 1, -1])
new_time_ = tf.abs(new_time - time_)
radius_ = tf.gather(radius, tf.cast(new_time_, tf.int32))
# End My Code: Find 2048 * 27 ------>
# <-----My query_ball
# 9 represents another sample
idx, pts_cnt = query_ball_point_var_rad(radius_, 8, xyz, new_anchor_xyz)
mask = tf.reshape(tf.stop_gradient(tf.cast(pts_cnt > 0, float)), [batch_size, npoint * 4, 1, 1])
grouped_xyz = group_point(xyz, idx)
grouped_xyz -= tf.tile(tf.expand_dims(new_anchor_xyz, 2),[1,1,8,1])
if points is not None:
grouped_points = group_point(points, idx)
grouped_time = group_point(time, idx)
temp_time = tf.abs(grouped_time - time_b)
if use_xyz:
new_points = tf.concat([grouped_xyz, grouped_points, temp_time],
axis=-1)
else:
new_points = grouped_points
else:
new_points = grouped_xyz
new_points = new_points * mask
# My query_ball------>
# <-----My Point Feature Embedding
for i, num_out_channel in enumerate(mlp):
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='conv%d' % (i), bn_decay=bn_decay,
data_format=data_format)
# After mlp : 2 layers
local_feature = tf.concat([new_points, grouped_points, grouped_xyz, temp_time], axis=-1)
local_weight = tf_util.conv2d(local_feature, new_points.get_shape()[-1], kernel_size=[1, 1],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training, activation_fn=None,#
scope='convw', bn_decay=bn_decay, data_format=data_format)
local_weight = tf.nn.softmax(local_weight, dim=2)
new_points = tf.multiply(new_points, local_weight)
new_points = tf.reduce_sum(new_points, axis=2)
# After pooling : 1 layer
new_points = tf.reshape(new_points, [batch_size, npoint, 4, -1])
for i, num_out_channel in enumerate(mlp2):
i+=len(mlp)
new_points = tf_util.conv2d(new_points, num_out_channel, [1, 4],
padding='VALID', stride=[1, 1],
bn=bn, is_training=is_training,
scope='conva%d' % (i), bn_decay=bn_decay,
data_format=data_format)
# After mlp : 1 layers
new_points = tf.squeeze(new_points, [2])
# After pooling : 2 layers
return new_xyz_core, new_time_core, new_points, idx
# My Point Feature Embedding------>
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training, scope, bn_decay=None, bn=True, pooling='max', radius=None, knn=True):
"""
Feature propagation from xyz2 (less points) to xyz1 (more points)
Inputs:
xyz1: (batch_size, npoint1, 3)
xyz2: (batch_size, npoint2, 3)
feat1: (batch_size, npoint1, channel1) features for xyz1 points (earlier layers)
feat2: (batch_size, npoint2, channel2) features for xyz2 points
Output:
feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)
TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
"""
with tf.variable_scope(scope) as sc:
if knn:
l2_dist, idx = knn_point(nsample, xyz2, xyz1)
else:
idx, pts_cnt = query_ball_point(radius, nsample, xyz2, xyz1)
xyz2_grouped = group_point(xyz2, idx)
xyz1_expanded = tf.expand_dims(xyz1, 2)
xyz_diff = xyz2_grouped - xyz1_expanded
feat2_grouped = group_point(feat2, idx)
net = tf.concat([feat2_grouped, xyz_diff], axis=3)
if mlp is None: mlp=[]
for i, num_out_channel in enumerate(mlp):
net = tf_util.conv2d(net, num_out_channel, [1,1],
padding='VALID', stride=[1,1],
bn=True, | |
"""
# Signal to the thread that we are reading to stop processing events.
self.end_capture.set()
# Call CloseTrace to cause ProcessTrace to return (unblock)
et.CloseTrace(self.trace_handle)
# If ProcessThread is actively parsing an event, we want to give it a chance to finish
# before pulling the rug out from underneath it.
self.process_thread.join()
@staticmethod
def check_callback_flag(flag):
"""
Checks callback flags.
:return: Returns flags on success, on failure raises exception
"""
flags = [RETURN_RAW_DATA_ONLY,
RETURN_RAW_DATA_ON_ERROR,
RETURN_ONLY_RAW_DATA_ON_ERROR,
RETURN_RAW_UNFORMATTED_DATA]
if flag not in flags:
raise Exception('Callback flag value {:d} passed into EventConsumer is invalid'.format(flag))
return flag
@staticmethod
def _run(trace_handle, end_capture):
"""
Because ProcessTrace() blocks, this function is used to spin off new threads.
:param trace_handle: The handle for the trace consumer that we want to begin processing.
:param end_capture: A callback function which determines what should be done with the results.
:return: Does not return a value.
"""
while True:
if tdh.ERROR_SUCCESS != et.ProcessTrace(ct.byref(trace_handle), 1, None, None):
end_capture.set()
if end_capture.isSet():
break
@staticmethod
def _getEventInformation(record):
"""
Initially we are handed an EVENT_RECORD structure. While this structure technically contains
all of the information necessary, TdhGetEventInformation parses the structure and simplifies it
so we can more effectively parse and handle the various fields.
:param record: The EventRecord structure for the event we are parsing
:return: Returns a pointer to a TRACE_EVENT_INFO structure or None on error.
"""
info = ct.POINTER(tdh.TRACE_EVENT_INFO)()
buffer_size = wt.DWORD()
# Call TdhGetEventInformation once to get the required buffer size and again to actually populate the structure.
status = tdh.TdhGetEventInformation(record, 0, None, None, ct.byref(buffer_size))
if tdh.ERROR_INSUFFICIENT_BUFFER == status:
info = ct.cast((ct.c_byte * buffer_size.value)(), ct.POINTER(tdh.TRACE_EVENT_INFO))
status = tdh.TdhGetEventInformation(record, 0, None, info, ct.byref(buffer_size))
if tdh.ERROR_SUCCESS != status:
raise ct.WinError(status)
return info
@staticmethod
def _getArraySize(record, info, event_property):
"""
Some of the properties encountered when parsing represent an array of values. This function
will retrieve the size of the array.
:param record: The EventRecord structure for the event we are parsing
:param info: The TraceEventInfo structure for the event we are parsing
:param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing
:return: Returns a DWORD representing the size of the array or None on error.
"""
event_property_array = ct.cast(info.contents.EventPropertyInfoArray, ct.POINTER(tdh.EVENT_PROPERTY_INFO))
flags = event_property.Flags
if flags & tdh.PropertyParamCount:
data_descriptor = tdh.PROPERTY_DATA_DESCRIPTOR()
j = event_property.epi_u2.countPropertyIndex
property_size = wt.DWORD()
count = wt.DWORD()
data_descriptor.PropertyName = info + event_property_array[j].NameOffset
data_descriptor.ArrayIndex = MAX_UINT
status = tdh.TdhGetPropertySize(record, 0, None, 1, ct.byref(data_descriptor), ct.byref(property_size))
if tdh.ERROR_SUCCESS != status:
raise ct.WinError(status)
status = tdh.TdhGetProperty(record, 0, None, 1, ct.byref(data_descriptor), property_size, ct.byref(count))
if tdh.ERROR_SUCCESS != status:
raise ct.WinError(status)
return count
if flags & tdh.PropertyParamFixedCount:
raise ETWException('PropertyParamFixedCount not supported')
return event_property.epi_u2.count
@staticmethod
def _getPropertyLength(record, info, event_property):
"""
Each property encountered when parsing the top level property has an associated length. If the
length is available, retrieve it here. In some cases, the length is 0. This can signify that
we are dealing with a variable length field such as a structure, an IPV6 data, or a string.
:param record: The EventRecord structure for the event we are parsing
:param info: The TraceEventInfo structure for the event we are parsing
:param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing
:return: Returns the length of the property as a c_ulong() or None on error
"""
flags = event_property.Flags
if flags & tdh.PropertyParamLength:
data_descriptor = tdh.PROPERTY_DATA_DESCRIPTOR()
event_property_array = ct.cast(info.contents.EventPropertyInfoArray, ct.POINTER(tdh.EVENT_PROPERTY_INFO))
j = wt.DWORD(event_property.epi_u3.length)
property_size = ct.c_ulong()
length = wt.DWORD()
# Setup the PROPERTY_DATA_DESCRIPTOR structure
data_descriptor.PropertyName = (ct.cast(info, ct.c_voidp).value + event_property_array[j.value].NameOffset)
data_descriptor.ArrayIndex = MAX_UINT
status = tdh.TdhGetPropertySize(record, 0, None, 1, ct.byref(data_descriptor), ct.byref(property_size))
if tdh.ERROR_SUCCESS != status:
raise ct.WinError(status)
status = tdh.TdhGetProperty(record,
0,
None,
1,
ct.byref(data_descriptor),
property_size,
ct.cast(ct.byref(length), ct.POINTER(ct.c_byte)))
if tdh.ERROR_SUCCESS != status:
raise ct.WinError(status)
return length.value
in_type = event_property.epi_u1.nonStructType.InType
out_type = event_property.epi_u1.nonStructType.OutType
# This is a special case in which the input and output types dictate the size
if (in_type == tdh.TDH_INTYPE_BINARY) and (out_type == tdh.TDH_OUTTYPE_IPV6):
return ct.sizeof(ia.IN6_ADDR)
return event_property.epi_u3.length
@staticmethod
def _getMapInfo(record, info, event_property):
"""
When parsing a field in the event property structure, there may be a mapping between a given
name and the structure it represents. If it exists, we retrieve that mapping here.
Because this may legitimately return a NULL value we return a tuple containing the success or
failure status as well as either None (NULL) or an EVENT_MAP_INFO pointer.
:param record: The EventRecord structure for the event we are parsing
:param info: The TraceEventInfo structure for the event we are parsing
:param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing
:return: A tuple of the map_info structure and boolean indicating whether we succeeded or not
"""
map_name = rel_ptr_to_str(info, event_property.epi_u1.nonStructType.MapNameOffset)
map_size = wt.DWORD()
map_info = ct.POINTER(tdh.EVENT_MAP_INFO)()
status = tdh.TdhGetEventMapInformation(record, map_name, None, ct.byref(map_size))
if tdh.ERROR_INSUFFICIENT_BUFFER == status:
map_info = ct.cast((ct.c_char * map_size.value)(), ct.POINTER(tdh.EVENT_MAP_INFO))
status = tdh.TdhGetEventMapInformation(record, map_name, map_info, ct.byref(map_size))
if tdh.ERROR_SUCCESS == status:
return map_info, True
# ERROR_NOT_FOUND is actually a perfectly acceptable status
if tdh.ERROR_NOT_FOUND == status:
return None, True
# We actually failed.
raise ct.WinError()
def _unpackSimpleType(self, record, info, event_property):
"""
This method handles dumping all simple types of data (i.e., non-struct types).
:param record: The EventRecord structure for the event we are parsing
:param info: The TraceEventInfo structure for the event we are parsing
:param event_property: The EVENT_PROPERTY_INFO structure for the TopLevelProperty of the event we are parsing
:return: Returns a key-value pair as a dictionary. If we fail, the dictionary is {}
"""
# Get the EVENT_MAP_INFO, if it is present.
map_info, success = self._getMapInfo(record, info, event_property)
if not success:
return {}
# Get the length of the value of the property we are dealing with.
property_length = self._getPropertyLength(record, info, event_property)
if property_length is None:
return {}
# The version of the Python interpreter may be different than the system architecture.
if record.contents.EventHeader.Flags & ec.EVENT_HEADER_FLAG_32_BIT_HEADER:
ptr_size = 4
else:
ptr_size = 8
name_field = rel_ptr_to_str(info, event_property.NameOffset)
if property_length == 0 and self.vfield_length is not None:
if self.vfield_length == 0:
self.vfield_length = None
return {name_field: None}
# If vfield_length isn't 0, we should be able to parse the property.
property_length = self.vfield_length
# After calling the TdhFormatProperty function, use the UserDataConsumed parameter value to set the new values
# of the UserData and UserDataLength parameters (Subtract UserDataConsumed from UserDataLength and use
# UserDataLength to increment the UserData pointer).
# All of the variables needed to actually use TdhFormatProperty retrieve the value
user_data = record.contents.UserData + self.index
user_data_remaining = record.contents.UserDataLength - self.index
# if there is no data remaining then return
if user_data_remaining <= 0:
logger.warning('No more user data left, returning none for field {:s}'.format(name_field))
return {name_field: None}
in_type = event_property.epi_u1.nonStructType.InType
out_type = event_property.epi_u1.nonStructType.OutType
formatted_data_size = wt.DWORD()
formatted_data = wt.LPWSTR()
user_data_consumed = ct.c_ushort()
status = tdh.TdhFormatProperty(info,
map_info,
ptr_size,
in_type,
out_type,
ct.c_ushort(property_length),
user_data_remaining,
ct.cast(user_data, ct.POINTER(ct.c_byte)),
ct.byref(formatted_data_size),
None,
ct.byref(user_data_consumed))
if status == tdh.ERROR_INSUFFICIENT_BUFFER:
formatted_data = ct.cast((ct.c_char * formatted_data_size.value)(), wt.LPWSTR)
status = tdh.TdhFormatProperty(info,
map_info,
ptr_size,
in_type,
out_type,
ct.c_ushort(property_length),
user_data_remaining,
ct.cast(user_data, ct.POINTER(ct.c_byte)),
ct.byref(formatted_data_size),
formatted_data,
ct.byref(user_data_consumed))
if status != tdh.ERROR_SUCCESS:
# We can handle this error and still capture the data.
logger.warning('Failed to get data field data for {:s}, incrementing by reported size'.format(name_field))
self.index += property_length
return {name_field: None}
# Increment where we are in the user data segment that we are parsing.
self.index += user_data_consumed.value
if name_field.lower().endswith('length'):
try:
self.vfield_length = int(formatted_data.value, 10)
except ValueError:
logger.warning('Setting vfield_length to None')
self.vfield_length = None
data = formatted_data.value
# Convert the formatted data if necessary
if out_type in tdh.TDH_CONVERTER_LOOKUP and type(data) == tdh.TDH_CONVERTER_LOOKUP[out_type]:
data = tdh.TDH_CONVERTER_LOOKUP[out_type](data)
return {name_field: data}
def _parseExtendedData(self, record):
"""
This method handles dumping all extended data from the record
:param record: The EventRecord structure for the event we are parsing
:return: Returns a key-value pair as a dictionary.
"""
result = {}
for i in range(record.contents.ExtendedDataCount):
ext_type = record.contents.ExtendedData[i].ExtType
data_ptr = record.contents.ExtendedData[i].DataPtr
data_size = record.contents.ExtendedData[i].DataSize
try:
if ext_type == ec.EVENT_HEADER_EXT_TYPE_RELATED_ACTIVITYID:
d = ct.cast(data_ptr, ct.POINTER(ec.EVENT_EXTENDED_ITEM_RELATED_ACTIVITYID))
result['RelatedActivityID'] = str(d.contents.RelatedActivityId)
| |
#!/usr/bin/env python
from __future__ import print_function
"""
oleobj.py
oleobj is a Python script and module to parse OLE objects and files stored
into various file formats such as RTF or MS Office documents (e.g. Word, Excel).
Author: <NAME> - http://www.decalage.info
License: BSD, see source code or documentation
oleobj is part of the python-oletools package:
http://www.decalage.info/python/oletools
"""
# === LICENSE ==================================================================
# oleobj is copyright (c) 2015-2016 <NAME> (http://www.decalage.info)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
# CHANGELOG:
# 2015-12-05 v0.01 PL: - first version
# 2016-06 PL: - added main and process_file (not working yet)
# 2016-07-18 v0.48 SL: - added Python 3.5 support
# 2016-07-19 PL: - fixed Python 2.6-7 support
__version__ = '0.48'
#------------------------------------------------------------------------------
# TODO:
# + setup logging (common with other oletools)
#------------------------------------------------------------------------------
# REFERENCES:
# Reference for the storage of embedded OLE objects/files:
# [MS-OLEDS]: Object Linking and Embedding (OLE) Data Structures
# https://msdn.microsoft.com/en-us/library/dd942265.aspx
# - officeparser: https://github.com/unixfreak0037/officeparser
# TODO: oledump
#--- IMPORTS ------------------------------------------------------------------
import logging, struct, optparse, os, re, sys
from thirdparty.olefile import olefile
from thirdparty.xglob import xglob
# === LOGGING =================================================================
class NullHandler(logging.Handler):
"""
Log Handler without output, to avoid printing messages if logging is not
configured by the main application.
Python 2.7 has logging.NullHandler, but this is necessary for 2.6:
see https://docs.python.org/2.6/library/logging.html#configuring-logging-for-a-library
"""
def emit(self, record):
pass
def get_logger(name, level=logging.CRITICAL+1):
"""
Create a suitable logger object for this module.
The goal is not to change settings of the root logger, to avoid getting
other modules' logs on the screen.
If a logger exists with same name, reuse it. (Else it would have duplicate
handlers and messages would be doubled.)
The level is set to CRITICAL+1 by default, to avoid any logging.
"""
# First, test if there is already a logger with the same name, else it
# will generate duplicate messages (due to duplicate handlers):
if name in logging.Logger.manager.loggerDict:
#NOTE: another less intrusive but more "hackish" solution would be to
# use getLogger then test if its effective level is not default.
logger = logging.getLogger(name)
# make sure level is OK:
logger.setLevel(level)
return logger
# get a new logger:
logger = logging.getLogger(name)
# only add a NullHandler for this logger, it is up to the application
# to configure its own logging:
logger.addHandler(NullHandler())
logger.setLevel(level)
return logger
# a global logger object used for debugging:
log = get_logger('oleobj')
# === CONSTANTS ==============================================================
# some str methods on Python 2.x return characters,
# while the equivalent bytes methods return integers on Python 3.x:
if sys.version_info[0] <= 2:
# Python 2.x
NULL_CHAR = '\x00'
else:
# Python 3.x
NULL_CHAR = 0
# === GLOBAL VARIABLES =======================================================
# struct to parse an unsigned integer of 32 bits:
struct_uint32 = struct.Struct('<L')
assert struct_uint32.size == 4 # make sure it matches 4 bytes
# struct to parse an unsigned integer of 16 bits:
struct_uint16 = struct.Struct('<H')
assert struct_uint16.size == 2 # make sure it matches 2 bytes
# === FUNCTIONS ==============================================================
def read_uint32(data):
"""
Read an unsigned integer from the first 32 bits of data.
:param data: bytes string containing the data to be extracted.
:return: tuple (value, new_data) containing the read value (int),
and the new data without the bytes read.
"""
value = struct_uint32.unpack(data[0:4])[0]
new_data = data[4:]
return (value, new_data)
def read_uint16(data):
"""
Read an unsigned integer from the first 16 bits of data.
:param data: bytes string containing the data to be extracted.
:return: tuple (value, new_data) containing the read value (int),
and the new data without the bytes read.
"""
value = struct_uint16.unpack(data[0:2])[0]
new_data = data[2:]
return (value, new_data)
def read_LengthPrefixedAnsiString(data):
"""
Read a length-prefixed ANSI string from data.
:param data: bytes string containing the data to be extracted.
:return: tuple (value, new_data) containing the read value (bytes string),
and the new data without the bytes read.
"""
length, data = read_uint32(data)
# if length = 0, return a null string (no null character)
if length == 0:
return ('', data)
# extract the string without the last null character
ansi_string = data[:length-1]
# TODO: only in strict mode:
# check the presence of the null char:
assert data[length] == NULL_CHAR
new_data = data[length:]
return (ansi_string, new_data)
# === CLASSES ================================================================
class OleNativeStream (object):
"""
OLE object contained into an OLENativeStream structure.
(see MS-OLEDS 2.3.6 OLENativeStream)
"""
# constants for the type attribute:
# see MS-OLEDS 2.2.4 ObjectHeader
TYPE_LINKED = 0x01
TYPE_EMBEDDED = 0x02
def __init__(self, bindata=None):
"""
Constructor for OleNativeStream.
If bindata is provided, it will be parsed using the parse() method.
:param bindata: bytes, OLENativeStream structure containing an OLE object
"""
self.filename = None
self.src_path = None
self.unknown_short = None
self.unknown_long_1 = None
self.unknown_long_2 = None
self.temp_path = None
self.actual_size = None
self.data = None
if bindata is not None:
self.parse(data=bindata)
def parse(self, data):
"""
Parse binary data containing an OLENativeStream structure,
to extract the OLE object it contains.
(see MS-OLEDS 2.3.6 OLENativeStream)
:param data: bytes, OLENativeStream structure containing an OLE object
:return:
"""
# TODO: strict mode to raise exceptions when values are incorrect
# (permissive mode by default)
# self.native_data_size = struct.unpack('<L', data[0:4])[0]
# data = data[4:]
# log.debug('OLE native data size = {0:08X} ({0} bytes)'.format(self.native_data_size))
# I thought this might be an OLE type specifier ???
self.unknown_short, data = read_uint16(data)
self.filename, data = data.split(b'\x00', 1)
# source path
self.src_path, data = data.split(b'\x00', 1)
# TODO I bet these next 8 bytes are a timestamp => FILETIME from olefile
self.unknown_long_1, data = read_uint32(data)
self.unknown_long_2, data = read_uint32(data)
# temp path?
self.temp_path, data = data.split(b'\x00', 1)
# size of the rest of the data
self.actual_size, data = read_uint32(data)
self.data = data[0:self.actual_size]
# TODO: exception when size > remaining data
# TODO: SLACK DATA
class OleObject (object):
"""
OLE 1.0 Object
see MS-OLEDS 2.2 OLE1.0 Format Structures
"""
# constants for the format_id attribute:
# see MS-OLEDS 2.2.4 ObjectHeader
TYPE_LINKED = 0x01
TYPE_EMBEDDED = 0x02
def __init__(self, bindata=None):
"""
Constructor for OleObject.
If bindata is provided, it will be parsed using the parse() method.
:param bindata: bytes, OLE 1.0 Object structure containing an OLE object
"""
self.ole_version = None
self.format_id = None
self.class_name = None
self.topic_name = None
self.item_name = None
self.data = None
self.data_size = None
def parse(self, data):
"""
Parse binary data containing an OLE 1.0 Object structure,
to extract the OLE object it contains.
(see MS-OLEDS 2.2 OLE1.0 Format Structures)
:param data: bytes, OLE 1.0 Object structure containing an OLE object
:return:
"""
# Header: see MS-OLEDS 2.2.4 ObjectHeader
self.ole_version, data = read_uint32(data)
self.format_id, data = read_uint32(data)
log.debug('OLE version=%08X - Format ID=%08X' % (self.ole_version, self.format_id))
assert self.format_id in (self.TYPE_EMBEDDED, self.TYPE_LINKED)
self.class_name, data = read_LengthPrefixedAnsiString(data)
self.topic_name, data = read_LengthPrefixedAnsiString(data)
self.item_name, data = read_LengthPrefixedAnsiString(data)
log.debug('Class name=%r - Topic name=%r - Item name=%r'
% (self.class_name, self.topic_name, self.item_name))
if self.format_id == self.TYPE_EMBEDDED:
# Embedded object: see MS-OLEDS 2.2.5 EmbeddedObject
#assert self.topic_name != '' and self.item_name != ''
self.data_size, data = read_uint32(data)
log.debug('Declared data size=%d - remaining size=%d' % (self.data_size, len(data)))
# TODO: handle incorrect size to avoid exception
self.data = data[:self.data_size]
assert len(self.data) == self.data_size
self.extra_data = | |
import torch
from torch import nn
from torch.nn import Module
import copy
import pdb
from models.stylegan2.model import EqualLinear, PixelNorm
torch.autograd.set_detect_anomaly(False)
STYLESPACE_DIMENSIONS = [512 for _ in range(15)] + [256, 256, 256] + [128, 128, 128] + [64, 64, 64] + [32, 32]
class Mapper(Module):
def __init__(self, opts, latent_dim=512, num_layers=4, input_dim=None):
super(Mapper, self).__init__()
# print(opts.lr_mul)
self.latent_dim = latent_dim
self.opts = opts
layers = [PixelNorm()]
if input_dim is not None:
pass
elif not self.opts.landmarks:
input_dim = latent_dim
else:
input_dim = latent_dim + 68*2
layers.append(
EqualLinear(
input_dim, latent_dim, lr_mul=opts.lr_mul, activation='fused_lrelu'
)
)
for i in range(num_layers-1):
layers.append(
EqualLinear(
latent_dim, latent_dim, lr_mul=opts.lr_mul, activation='fused_lrelu'
)
)
self.input_dim = input_dim
self.mapping = nn.Sequential(*layers)
def forward(self, x):
x = self.mapping(x)
return x
class SingleMapper(Module):
def __init__(self, opts):
super(SingleMapper, self).__init__()
self.opts = opts
self.mapping = Mapper(opts)
def forward(self, x):
out = self.mapping(x)
return out
class LevelsMapper(Module):
def __init__(self, opts):
super(LevelsMapper, self).__init__()
self.opts = opts
self.mapper_list=nn.ModuleList()
if not opts.no_coarse_mapper:
# self.course_mapping = Mapper(opts)
for i in range(0,4):
self.mapper_list.append(Mapper(opts))
if not opts.no_medium_mapper:
# self.medium_mapping = Mapper(opts)
for i in range(4,8):
self.mapper_list.append(Mapper(opts))
if not opts.no_fine_mapper:
# self.fine_mapping = Mapper(opts)
for i in range(8,self.opts.n_styles):
self.mapper_list.append(Mapper(opts))
def forward(self, x):
out_list = []
if not self.opts.no_coarse_mapper:
for i in range(0,4):
out_list.append(self.mapper_list[i](x))
else:
for i in range(0,4):
out_list.append(torch.zeros_like(x))
if not self.opts.no_medium_mapper:
for i in range(4,8):
out_list.append(self.mapper_list[i](x))
else:
for i in range(4,8):
out_list.append(torch.zeros_like(x))
if not self.opts.no_fine_mapper:
for i in range(8,self.opts.n_styles):
out_list.append(self.mapper_list[i](x))
else:
for i in range(8,self.opts.n_styles):
out_list.append(torch.zeros_like(x))
out = torch.stack(out_list, dim=1)
return out
class SharedLevelsMapper(Module):
def __init__(self, opts):
super(SharedLevelsMapper, self).__init__()
self.opts = opts
self.shared = Mapper(opts,num_layers=3)
self.mapper_list=nn.ModuleList()
if not opts.no_coarse_mapper:
for i in range(0,4):
self.mapper_list.append(EqualLinear(
512, 512, lr_mul=opts.lr_mul, activation='fused_lrelu'
)
)
if not opts.no_medium_mapper:
for i in range(4,8):
self.mapper_list.append(EqualLinear(
512, 512, lr_mul=opts.lr_mul, activation='fused_lrelu'
)
)
if not opts.no_fine_mapper:
# self.fine_mapping = Mapper(opts)
for i in range(8,self.opts.n_styles):
self.mapper_list.append(EqualLinear(
512, 512, lr_mul=opts.lr_mul, activation='fused_lrelu'
)
)
def forward(self, x):
x = self.shared(x)
out_list = []
if not self.opts.no_coarse_mapper:
for i in range(0,4):
out_list.append(self.mapper_list[i](x))
else:
for i in range(0,4):
out_list.append(torch.zeros_like(x))
if not self.opts.no_medium_mapper:
for i in range(4,8):
out_list.append(self.mapper_list[i](x))
else:
for i in range(4,8):
out_list.append(torch.zeros_like(x))
if not self.opts.no_fine_mapper:
for i in range(8,self.opts.n_styles):
out_list.append(self.mapper_list[i](x))
else:
for i in range(8,self.opts.n_styles):
out_list.append(torch.zeros_like(x))
out = torch.stack(out_list, dim=1)
return out
class ThreeLevelsMapper(Module):
def __init__(self, opts):
super(ThreeLevelsMapper, self).__init__()
self.opts = opts
self.mapper_list=nn.ModuleList()
if not opts.no_coarse_mapper:
self.mapper_list.append(Mapper(opts))
if not opts.no_medium_mapper:
self.mapper_list.append(Mapper(opts))
if not opts.no_fine_mapper:
self.mapper_list.append(Mapper(opts))
def forward(self, x):
out_list = []
# x_coarse = x.view(x.shape[0],1,-1).repeat(1,4,1)
# x_med = x.view(x.shape[0],1,-1).repeat(1,4,1)
# x_fine = x.view(x.shape[0],1,-1).repeat(1,self.opts.n_styles-8,1)
# if not self.opts.no_coarse_mapper:
# out_list.append(self.mapper_list[0](x_coarse))
# else:
# out_list.append(torch.zeros_like(x_coarse))
# if not self.opts.no_medium_mapper:
# out_list.append(self.mapper_list[1](x_med))
# else:
# out_list.append(torch.zeros_like(x_med))
# if not self.opts.no_fine_mapper:
# out_list.append(self.mapper_list[2](x_fine))
# else:
# out_list.append(torch.zeros_like(x_fine))
# out = torch.cat(out_list, dim=1)
if not self.opts.no_coarse_mapper:
for i in range(0,4):
out_list.append(self.mapper_list[0](x))
else:
for i in range(0,4):
out_list.append(torch.zeros_like(x))
if not self.opts.no_medium_mapper:
for i in range(4,8):
out_list.append(self.mapper_list[1](x))
else:
for i in range(4,8):
out_list.append(torch.zeros_like(x))
if not self.opts.no_fine_mapper:
for i in range(8,self.opts.n_styles):
out_list.append(self.mapper_list[2](x))
else:
for i in range(8,self.opts.n_styles):
out_list.append(torch.zeros_like(x))
out = torch.stack(out_list, dim=1)
return out
# Author: <NAME>
# URL: https://shenxiaohai.me/2018/10/20/pytorch-tutorial-advanced-02/#%E5%88%9B%E5%BB%BAVAE%E6%A8%A1%E5%9E%8B%EF%BC%88%E5%8F%98%E5%88%86%E8%87%AA%E7%BC%96%E7%A0%81%E5%99%A8%EF%BC%88Variational-Auto-Encoder%EF%BC%89%EF%BC%89
class VAEMapper(Module):
def __init__(self, opts, output_dim=512):
super(VAEMapper, self).__init__()
self.opts = opts
self.output_dim = output_dim
self.mapper = Mapper(opts)
self.latent_dim = self.mapper.latent_dim
self.to_mu = nn.Linear(self.latent_dim, self.output_dim)
self.to_var = nn.Linear(self.latent_dim, self.output_dim)
def encode(self, x):
x = self.mapper(x)
return self.to_mu(x), self.to_var(x)
def reparameterize(self, mu, log_var, zero_var=False):
std = torch.exp(log_var*0.5)
eps = torch.randn_like(std)
if zero_var == True:
return mu
else:
return mu + eps * std
def forward(self, x, return_dstr=False, zero_var=False):
mu, log_var = self.encode(x)
z = self.reparameterize(mu, log_var, zero_var=zero_var)
if return_dstr:
return z, mu, log_var
else:
return z
class TwoChannelMapper(Module):
def __init__(self, opts, output_dim=512, input_hw=256):
super(TwoChannelMapper, self).__init__()
self.opts = opts
self.output_dim = output_dim
self.bottleNeck_dim = self.opts.bottleNeck_dim
self.input_hw = input_hw
self.CLIPMapper = Mapper(opts)
self.latent_dim = self.CLIPMapper.latent_dim
modules = []
in_channels = 3
hidden_dims = [32, 64, 128, 256, 512] #feature dim = [512, 256, 128, 64, 32]
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.imageEncoder = nn.Sequential(*modules)
self.imageEncoder_out_dim = hidden_dims[-1] * int(input_hw/(2**len(hidden_dims)))**2
self.to_mu = nn.Linear(self.imageEncoder_out_dim, self.bottleNeck_dim)
self.to_var = nn.Linear(self.imageEncoder_out_dim, self.bottleNeck_dim)
self.ImageMapper = Mapper(opts,input_dim=self.bottleNeck_dim)
# for n, p in self.imageEncoder.named_parameters():
# pdb.set_trace()
# print(p.dtype)
def encodeImage(self, x):
# pdb.set_trace()
# print(torch.all(torch.isfinite(x)))
# with torch.backends.cudnn.flags(enabled=False):
x = self.imageEncoder(x)
# print(torch.all(torch.isfinite(x)))
# pdb.set_trace()
x = torch.flatten(x, start_dim=1)
# pdb.set_trace()
return self.to_mu(x), self.to_var(x)
def reparameterize(self, mu, log_var, zero_var=False):
std = torch.exp(log_var*0.5)
eps = torch.randn_like(std)
if zero_var == True:
return mu
else:
return mu + eps * std
def forward(self, e, x, return_dstr=False, zero_var=False):
if x != 'random':
mu, log_var = self.encodeImage(x)
else:
mu = torch.zeros(e.shape[0],self.bottleNeck_dim, device=e.device)
log_var = torch.zeros(e.shape[0],self.bottleNeck_dim, device=e.device)
z = self.reparameterize(mu, log_var, zero_var=zero_var)
w = self.ImageMapper(z) + self.CLIPMapper(e)
if return_dstr:
return w, mu, log_var, None
else:
return w
class cVAEMapper(Module):
def __init__(self, opts, output_dim=512, input_hw=256):
super(cVAEMapper, self).__init__()
self.opts = opts
self.output_dim = output_dim
self.bottleNeck_dim = self.opts.bottleNeck_dim
self.input_hw = input_hw
self.CLIPMapper = Mapper(opts)
self.latent_dim = self.CLIPMapper.latent_dim
if self.opts.conditional_mu:
self.priorMapper = Mapper(opts,latent_dim = self.bottleNeck_dim, input_dim = 512)
modules = []
in_channels = 3
hidden_dims = [32, 64, 128, 256, 512] #feature dim = [512, 256, 128, 64, 32]
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.imageEncoder_out_dim = hidden_dims[-1] * int(input_hw/(2**len(hidden_dims)))**2
self.imageEncoder = nn.Sequential(*modules)
self.fc = nn.Linear(self.imageEncoder_out_dim, 512)
self.MLP1 = Mapper(opts,input_dim=1024)
self.to_mu = nn.Linear(512, self.bottleNeck_dim)
self.to_var = nn.Linear(512, self.bottleNeck_dim)
if self.opts.learn_in_w:
self.MLP2 = Mapper(opts,input_dim=512+self.bottleNeck_dim)
else:
self.MLP2 = nn.ModuleList()
if not opts.no_coarse_mapper:
self.MLP2.append(Mapper(opts,input_dim=512+self.bottleNeck_dim))
if not opts.no_medium_mapper:
self.MLP2.append(Mapper(opts,input_dim=512+self.bottleNeck_dim))
if not opts.no_fine_mapper:
self.MLP2.append(Mapper(opts,input_dim=512+self.bottleNeck_dim))
def encodeImage(self, e, x):
x = self.imageEncoder(x)
x = torch.flatten(x, start_dim=1)
x = self.fc(x)
x = torch.cat([x,e],dim=-1)
x = self.MLP1(x)
return self.to_mu(x), self.to_var(x)
def reparameterize(self, mu, log_var, zero_var=False,pre_defined_repara=None):
std = torch.exp(log_var*0.5)
if pre_defined_repara is None:
eps = torch.randn_like(std)
else:
eps = pre_defined_repara
if zero_var == True:
return mu
else:
return mu + eps * std
def forward(self, e, x, return_dstr=False, zero_var=False,pre_defined_repara=None):
if self.opts.conditional_mu:
# pdb.set_trace()
prior_mu = self.priorMapper(e)
else:
prior_mu = None
if x == 'random':
if prior_mu is None:
mu = torch.zeros(e.shape[0],self.bottleNeck_dim, device=e.device)
else:
mu = prior_mu
log_var = torch.zeros(e.shape[0],self.bottleNeck_dim, device=e.device)
elif type(x) is not torch.Tensor:
assert 'random' == x[:6]
std_scale = float(x[6:])
if prior_mu is None:
mu = torch.zeros(e.shape[0],self.bottleNeck_dim, device=e.device)
else:
mu = prior_mu
log_var = np.log(std_scale**2)+torch.zeros(e.shape[0],self.bottleNeck_dim, device=e.device)
else:
mu, log_var = self.encodeImage(e,x)
z = self.reparameterize(mu, log_var, zero_var=zero_var,pre_defined_repara=pre_defined_repara)
z = torch.cat([z,e],dim=-1)
if self.opts.learn_in_w:
w = self.MLP2(z)
else:
w = []
if not self.opts.no_coarse_mapper:
for i in range(0,4):
w.append(self.MLP2[0](z))
else:
for i in range(0,4):
w.append(torch.zeros_like(z))
if not self.opts.no_medium_mapper:
for i in range(4,8):
w.append(self.MLP2[1](z))
else:
for i in range(4,8):
w.append(torch.zeros_like(z))
if not self.opts.no_fine_mapper:
for i in range(8,self.opts.n_styles):
w.append(self.MLP2[2](z))
else:
for i in range(8,self.opts.n_styles):
w.append(torch.zeros_like(z))
w = torch.stack(w, dim=1)
if return_dstr:
return w, mu, log_var, prior_mu
else:
return w
class cVAEdoubleWMapper(Module):
def __init__(self, opts, output_dim=512, input_hw=256):
super(cVAEdoubleWMapper, self).__init__()
self.opts = opts
self.output_dim = output_dim
self.bottleNeck_dim = self.opts.bottleNeck_dim
assert self.bottleNeck_dim == 512
self.input_hw = input_hw
self.CLIPMapper = Mapper(opts)
self.latent_dim = self.CLIPMapper.latent_dim
if self.opts.conditional_mu:
self.priorMapper = Mapper(opts,latent_dim = self.bottleNeck_dim, input_dim = 512)
modules = []
in_channels = 3
hidden_dims = [32, 64, 128, 256, 512] #feature dim = [512, 256, 128, 64, 32]
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.imageEncoder_out_dim = hidden_dims[-1] * int(input_hw/(2**len(hidden_dims)))**2
self.imageEncoder = nn.Sequential(*modules)
self.fc = nn.Linear(self.imageEncoder_out_dim, 512)
self.MLP1 = Mapper(opts,input_dim=1024)
self.to_mu = nn.Linear(512, self.bottleNeck_dim)
self.to_var = nn.Linear(512, self.bottleNeck_dim)
self.MLP2 = Mapper(opts)
self.MLP3 = Mapper(opts, input_dim=1024)
self.create_f()
def create_f(self):
self.SGf = Mapper(self.opts,num_layers=8)
f_ckpt = torch.load(self.opts.stylegan_weights)['g_ema']
d_filt = {'mapping.'+k[len('style') + 1:]: v for k, v in f_ckpt.items() if k[:len('style')] == 'style'}
self.SGf.load_state_dict(d_filt)
for p in self.SGf.parameters():
p.requires_grad = False
def encodeImage(self, e, x):
x = self.imageEncoder(x)
x = torch.flatten(x, start_dim=1)
x = self.fc(x)
x = torch.cat([x,e],dim=-1)
x = self.MLP1(x)
return self.to_mu(x), self.to_var(x)
def reparameterize(self, mu, log_var, zero_var=False):
std = torch.exp(log_var*0.5)
eps = torch.randn_like(std)
if zero_var == True:
return mu
else:
return mu + eps * std
def forward(self, e, x, return_dstr=False, | |
IDs.
(string) --
:type EnvironmentNames: list
:param EnvironmentNames: If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those that have the specified names.
(string) --
:type IncludeDeleted: boolean
:param IncludeDeleted: Indicates whether to include deleted environments:
true : Environments that have been deleted after IncludedDeletedBackTo are displayed.false : Do not include deleted environments.
:type IncludedDeletedBackTo: datetime
:param IncludedDeletedBackTo: If specified when IncludeDeleted is set to true , then environments deleted after this date are displayed.
:rtype: dict
:return: {
'Environments': [
{
'EnvironmentName': 'string',
'EnvironmentId': 'string',
'ApplicationName': 'string',
'VersionLabel': 'string',
'SolutionStackName': 'string',
'PlatformArn': 'string',
'TemplateName': 'string',
'Description': 'string',
'EndpointURL': 'string',
'CNAME': 'string',
'DateCreated': datetime(2015, 1, 1),
'DateUpdated': datetime(2015, 1, 1),
'Status': 'Launching'|'Updating'|'Ready'|'Terminating'|'Terminated',
'AbortableOperationInProgress': True|False,
'Health': 'Green'|'Yellow'|'Red'|'Grey',
'HealthStatus': 'NoData'|'Unknown'|'Pending'|'Ok'|'Info'|'Warning'|'Degraded'|'Severe',
'Resources': {
'LoadBalancer': {
'LoadBalancerName': 'string',
'Domain': 'string',
'Listeners': [
{
'Protocol': 'string',
'Port': 123
},
]
}
},
'Tier': {
'Name': 'string',
'Type': 'string',
'Version': 'string'
},
'EnvironmentLinks': [
{
'LinkName': 'string',
'EnvironmentName': 'string'
},
]
},
]
}
:returns:
Launching : Environment is in the process of initial deployment.
Updating : Environment is in the process of updating its configuration settings or application version.
Ready : Environment is available to have an action performed on it, such as update or terminate.
Terminating : Environment is in the shut-down process.
Terminated : Environment is not running.
"""
pass
def describe_events(ApplicationName=None, VersionLabel=None, TemplateName=None, EnvironmentId=None, EnvironmentName=None, PlatformArn=None, RequestId=None, Severity=None, StartTime=None, EndTime=None, MaxRecords=None, NextToken=None):
"""
Returns list of event descriptions matching criteria up to the last 6 weeks.
See also: AWS API Documentation
Examples
The following operation retrieves events for an environment named my-env:
Expected Output:
:example: response = client.describe_events(
ApplicationName='string',
VersionLabel='string',
TemplateName='string',
EnvironmentId='string',
EnvironmentName='string',
PlatformArn='string',
RequestId='string',
Severity='TRACE'|'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL',
StartTime=datetime(2015, 1, 1),
EndTime=datetime(2015, 1, 1),
MaxRecords=123,
NextToken='string'
)
:type ApplicationName: string
:param ApplicationName: If specified, AWS Elastic Beanstalk restricts the returned descriptions to include only those associated with this application.
:type VersionLabel: string
:param VersionLabel: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this application version.
:type TemplateName: string
:param TemplateName: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that are associated with this environment configuration.
:type EnvironmentId: string
:param EnvironmentId: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.
:type EnvironmentName: string
:param EnvironmentName: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those associated with this environment.
:type PlatformArn: string
:param PlatformArn: The ARN of the version of the custom platform.
:type RequestId: string
:param RequestId: If specified, AWS Elastic Beanstalk restricts the described events to include only those associated with this request ID.
:type Severity: string
:param Severity: If specified, limits the events returned from this call to include only those with the specified severity or higher.
:type StartTime: datetime
:param StartTime: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur on or after this time.
:type EndTime: datetime
:param EndTime: If specified, AWS Elastic Beanstalk restricts the returned descriptions to those that occur up to, but not including, the EndTime .
:type MaxRecords: integer
:param MaxRecords: Specifies the maximum number of events that can be returned, beginning with the most recent event.
:type NextToken: string
:param NextToken: Pagination token. If specified, the events return the next batch of results.
:rtype: dict
:return: {
'Events': [
{
'EventDate': datetime(2015, 1, 1),
'Message': 'string',
'ApplicationName': 'string',
'VersionLabel': 'string',
'TemplateName': 'string',
'EnvironmentName': 'string',
'PlatformArn': 'string',
'RequestId': 'string',
'Severity': 'TRACE'|'DEBUG'|'INFO'|'WARN'|'ERROR'|'FATAL'
},
],
'NextToken': 'string'
}
"""
pass
def describe_instances_health(EnvironmentName=None, EnvironmentId=None, AttributeNames=None, NextToken=None):
"""
Retrives detailed information about the health of instances in your AWS Elastic Beanstalk. This operation requires enhanced health reporting .
See also: AWS API Documentation
Examples
The following operation retrieves health information for instances in an environment named my-env:
Expected Output:
:example: response = client.describe_instances_health(
EnvironmentName='string',
EnvironmentId='string',
AttributeNames=[
'HealthStatus'|'Color'|'Causes'|'ApplicationMetrics'|'RefreshedAt'|'LaunchedAt'|'System'|'Deployment'|'AvailabilityZone'|'InstanceType'|'All',
],
NextToken='string'
)
:type EnvironmentName: string
:param EnvironmentName: Specify the AWS Elastic Beanstalk environment by name.
:type EnvironmentId: string
:param EnvironmentId: Specify the AWS Elastic Beanstalk environment by ID.
:type AttributeNames: list
:param AttributeNames: Specifies the response elements you wish to receive. To retrieve all attributes, set to All . If no attribute names are specified, returns a list of instances.
(string) --
:type NextToken: string
:param NextToken: Specify the pagination token returned by a previous call.
:rtype: dict
:return: {
'InstanceHealthList': [
{
'InstanceId': 'string',
'HealthStatus': 'string',
'Color': 'string',
'Causes': [
'string',
],
'LaunchedAt': datetime(2015, 1, 1),
'ApplicationMetrics': {
'Duration': 123,
'RequestCount': 123,
'StatusCodes': {
'Status2xx': 123,
'Status3xx': 123,
'Status4xx': 123,
'Status5xx': 123
},
'Latency': {
'P999': 123.0,
'P99': 123.0,
'P95': 123.0,
'P90': 123.0,
'P85': 123.0,
'P75': 123.0,
'P50': 123.0,
'P10': 123.0
}
},
'System': {
'CPUUtilization': {
'User': 123.0,
'Nice': 123.0,
'System': 123.0,
'Idle': 123.0,
'IOWait': 123.0,
'IRQ': 123.0,
'SoftIRQ': 123.0
},
'LoadAverage': [
123.0,
]
},
'Deployment': {
'VersionLabel': 'string',
'DeploymentId': 123,
'Status': 'string',
'DeploymentTime': datetime(2015, 1, 1)
},
'AvailabilityZone': 'string',
'InstanceType': 'string'
},
],
'RefreshedAt': datetime(2015, 1, 1),
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def describe_platform_version(PlatformArn=None):
"""
Describes the version of the platform.
See also: AWS API Documentation
:example: response = client.describe_platform_version(
PlatformArn='string'
)
:type PlatformArn: string
:param PlatformArn: The ARN of the version of the platform.
:rtype: dict
:return: {
'PlatformDescription': {
'PlatformArn': 'string',
'PlatformOwner': 'string',
'PlatformName': 'string',
'PlatformVersion': 'string',
'SolutionStackName': 'string',
'PlatformStatus': 'Creating'|'Failed'|'Ready'|'Deleting'|'Deleted',
'DateCreated': datetime(2015, 1, 1),
'DateUpdated': datetime(2015, 1, 1),
'PlatformCategory': 'string',
'Description': 'string',
'Maintainer': 'string',
'OperatingSystemName': 'string',
'OperatingSystemVersion': 'string',
'ProgrammingLanguages': [
{
'Name': 'string',
'Version': 'string'
},
],
'Frameworks': [
{
'Name': 'string',
'Version': 'string'
},
],
'CustomAmiList': [
{
'VirtualizationType': 'string',
'ImageId': 'string'
},
],
'SupportedTierList': [
'string',
],
'SupportedAddonList': [
'string',
]
}
}
:returns:
(string) --
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def list_available_solution_stacks():
"""
Returns a list of the available solution stack names, with the public version first and then in reverse chronological order.
See also: AWS API Documentation
Examples
The following operation lists solution stacks for all currently available platform configurations and any that you have used in the past:
Expected Output:
:example: response = client.list_available_solution_stacks()
:rtype: dict
:return: {
'SolutionStacks': [
'string',
],
'SolutionStackDetails': [
{
'SolutionStackName': 'string',
'PermittedFileTypes': [
'string',
]
},
]
}
:returns:
(string) --
"""
pass
def list_platform_versions(Filters=None, MaxRecords=None, NextToken=None):
"""
Lists the available platforms.
See also: AWS API Documentation
:example: response = client.list_platform_versions(
Filters=[
{
'Type': 'string',
'Operator': 'string',
'Values': [
'string',
]
},
],
MaxRecords=123,
NextToken='string'
)
:type Filters: list
:param Filters: List only the platforms where the platform member value relates to one of the supplied values.
(dict) --Specify criteria to restrict the results when listing custom platforms.
The filter is evaluated as the expression:
Type | |
#
# python code for building a parser from a grammar
# Copyright <NAME>, 1994
#
# BUGS:
# A bad grammar that has no derivations for
# the root nonterminal may cause a name error
# on the variable "GoodStartingPlace"
# this needs to be modified so the RULEGRAM is loaded from a
# compiled representation if available.
import string
import kjSet
import kjParser
import regex
# import some constants
from kjParser import \
TERMFLAG, NOMATCHFLAG, MOVETOFLAG, REDUCEFLAG, TRANSFLAG, KEYFLAG, \
NONTERMFLAG, TERMFLAG, EOFFLAG, ENDOFFILETOKEN
PMODULE = kjParser.THISMODULE
# errors raised here
TokenError = "TokenError" # may happen on autogen with bad grammar
NotSLRError = "NotSLRError" # may happen for nonSLR grammar
# set this flag for regression testing at each load
RUNTESTS = 0
# set this flag to abort automatic generation on Errors
ABORTONERROR = 0
# token used to mark null productions
NULLTOKEN = (None,None)
# a derived FSM class, with closure computation methods defined
# (compilable FSMachine)
#
class CFSMachine(kjParser.FSMachine):
def __init__(self, nonterm):
kjParser.FSMachine.__init__(self, nonterm)
# return the epsilon closure of the FSM as a new FSM
#
# DoNullMap, if set, will map unexpected tokens to
# the "empty" state (usually creating a really big fsm)
#
def Eclosure(self, Epsilon, DoNullMaps=0):
Closure = CFSMachine( self.root_nonTerminal )
# compute the Epsilon Graph between states
EGraph = kjSet.NewDG([])
for State in range(0,self.maxState+1):
# every state is E-connected to self
kjSet.AddArc( EGraph, State, State )
# add possible transition on epsilon (ONLY ONE SUPPORTED!)
key = (State, Epsilon)
if self.StateTokenMap.has_key(key):
keymap = self.StateTokenMap[key]
if keymap[0][0] != MOVETOFLAG:
raise TypeError, "unexpected map type in StateTokenMap"
for (Flag,ToState) in keymap:
kjSet.AddArc( EGraph, State, ToState )
#endfor
# transitively close EGraph
kjSet.TransClose( EGraph )
# Translate EGraph into a dictionary of lists
EMap = {}
for State in range(0,self.maxState+1):
EMap[State] = kjSet.Neighbors( EGraph, State )
# make each e-closure of each self.state a state of the closure FSM.
# here closure states assumed transient -- reset elsewhere.
# first do the initial state
Closure.States[ Closure.initial_state ] = \
[TRANSFLAG, kjSet.NewSet(EMap[self.initial_state]) ]
# do all other states (save initial and successful final states)
#for State in range(0,self.maxState+1):
# if State != self.initial_state \
# and State != self.successful_final_state:
# Closure.NewSetState(TRANSFLAG, kjSet.NewSet(EMap[State]) )
##endfor
# compute set of all known tokens EXCEPT EPSILON
Tokens = kjSet.NewSet( [] )
for (State, Token) in self.StateTokenMap.keys():
if Token != Epsilon:
kjSet.addMember(Token, Tokens)
# tranform it into a list
Tokens = kjSet.get_elts(Tokens)
# for each state of the the closure FSM (past final) add transitions
# and add new states as needed until all states are processed
# (uses convention that states are allocated sequentially)
ThisClosureState = 1
while ThisClosureState <= Closure.maxState:
MemberStates = kjSet.get_elts(Closure.States[ThisClosureState][1])
# for each possible Token, compute the union UTrans of all
# e-closures for all transitions for all member states,
# on the Token, make UTrans a new state (if needed),
# and transition ThisClosureState to UTrans on Token
for Token in Tokens:
UTrans = kjSet.NewSet( [] )
for MState in MemberStates:
# if MState has a transition on Token, include
# EMap for the destination state
key = (MState, Token)
if self.StateTokenMap.has_key(key):
DStateTup = self.StateTokenMap[key]
if DStateTup[0][0] != MOVETOFLAG:
raise TypeError, "unknown map type"
for (DFlag, DState) in DStateTup:
for EDState in EMap[DState]:
kjSet.addMember(EDState, UTrans)
#endif
#endfor MState
# register UTrans as a new state if needed
UTState = Closure.NewSetState(TRANSFLAG, UTrans)
# record transition from
# ThisClosureState to UTState on Token
if DoNullMaps:
Closure.SetMap( ThisClosureState, Token, UTState)
else:
if not kjSet.Empty(UTrans):
Closure.SetMap( ThisClosureState, Token, UTState)
#endfor Token
ThisClosureState = ThisClosureState +1
#endwhile
return Closure
#enddef Eclosure
# add an set-marked state to self if not present
# uses self.States[s][1] as the set marking the state s
#
# only used by Eclosure above
#
def NewSetState(self, kind, InSet):
# return existing state if one is present that matches the set
LastState= self.maxState
# skip state 0 (successful final state)???
for State in range(1,LastState+1):
MarkSet = self.States[State][1]
if kjSet.Same(InSet,MarkSet):
return State # nonlocal
#endfor
# if not exited then allocate a new state
LastState = LastState + 1
self.States[LastState] = [ kind , InSet ]
self.maxState = LastState
return LastState
#enddef newSetState
#endclass CFSMachine
# Ruleset class, used to compute NFA and then DFA for
# parsing based on a list of rules.
#
class ruleset:
def __init__(self, StartNonterm, Rulelist):
# initialize the ruleset
self.StartNonterm = StartNonterm
self.Rules = Rulelist
# method to compute prefixes and First sets for nonterminals
def CompFirst(self):
# uses the special null production token NULLTOKEN
# snarfed directly from Aho+Ullman (terminals glossed)
First = kjSet.NewDG( [] )
# repeat the while loop until no change is made to First
done = 0
while not done:
done = 1 # assume we're done until a change is made to First
# iterate through all rules looking for a new arc to add
# indicating Terminal --> possible first token derivation
#
for R in self.Rules:
GoalNonterm = R.Nonterm
Bodylength = len(R.Body)
# look through the body of the rule up to the token with
# no epsilon production (yet seen)
Bodyindex = 0
Processindex = 1
while Processindex:
# unless otherwise indicated below, don't go to next token
Processindex = 0
# if index is past end of body then record
# an epsilon production for this nonterminal
if Bodyindex >= Bodylength:
if not kjSet.HasArc(First, GoalNonterm, NULLTOKEN ):
kjSet.AddArc( First, GoalNonterm, NULLTOKEN )
done = 0 # change made to First
else:
# otherwise try to add firsts of this token
# to firsts of the Head of the rule.
Token = R.Body[Bodyindex]
(type, name) = Token
if type in (KEYFLAG,TERMFLAG):
# try to add this terminal to First for GoalNonterm
if not kjSet.HasArc(First, GoalNonterm, Token):
kjSet.AddArc( First, GoalNonterm, Token)
done = 0
elif type == NONTERMFLAG:
# try to add each First entry for nonterminal
# to First entry for GoalNonterm
for FToken in kjSet.Neighbors( First, Token ):
if not kjSet.HasArc(First, GoalNonterm, FToken):
kjSet.AddArc( First, GoalNonterm, FToken)
done = 0
# does this nonterminal have a known e production?
if kjSet.HasArc( First, Token, NULLTOKEN ):
# if so, process next token in rule
Processindex = 1
else:
raise TokenError, "unknown token type in rule body"
#endif
Bodyindex = Bodyindex + 1
#endwhile Processindex
#endfor R in self.Rules
#endwhile not done
self.First = First
#enddef CompFirst
# computing the Follow set for the ruleset
# the good news: I think it's correct.
# the bad news: It's slower than it needs to be for epsilon cases.
def CompFollow(self):
Follow = kjSet.NewDG( [] )
# put end marker on follow of start nonterminal
kjSet.AddArc(Follow, self.StartNonterm, kjParser.ENDOFFILETOKEN)
# now compute other follows using the rules;
# repeat the loop until no change to Follow.
done = 0
while not done:
done = 1 # assume done unless Follow changes
for R in self.Rules:
#print R
# work backwards in the rule body to
# avoid retesting for epsilon nonterminals
Bodylength = len(R.Body)
EpsilonTail = 1 # the tail of rule may expand to null
BodyIndex = Bodylength - 1
Last = 1 # loop starts at the last
from types import TupleType
while BodyIndex >= 0:
Token = R.Body[BodyIndex]
(Ttype,Tname) = Token
if Ttype in (KEYFLAG,TERMFLAG):
# keywords etc cancel epsilon tail, otherwise ignore
EpsilonTail = 0
elif Ttype == NONTERMFLAG:
# if the tail expands to epsilon, map
# follow for the goal nonterminal to this token
# and also follow for the tail nonterms
if EpsilonTail:
# add follow for goal
for FToken in kjSet.Neighbors(Follow,R.Nonterm):
if not kjSet.HasArc(Follow,Token,FToken):
kjSet.AddArc(Follow,Token,FToken)
#if type(FToken[0])==TupleType:
# raise ValueError, "bad FToken"+`FToken`
#print "new", Token, FToken
done = 0 # follow changed, loop again
# add follow for tail members
#for Index2 in range(BodyIndex+1, Bodylength):
# TailToken = R.Body[Index2]
# for FToken in kjSet.Neighbors(Follow,TailToken):
# if not kjSet.HasArc(Follow,Token,FToken):
# kjSet.AddArc(Follow,Token,FToken)
# done = 0
#endif EpsilonTail
# if we are not at the end | |
import os
from datetime import datetime, timedelta
import pytz
import numpy as np
import scipy.io as io
import utm
import yaml
from munch import Munch, munchify
from scipy.ndimage import median_filter
import scipy.signal as sig
def loadmat(filename, check_arrays=False, **kwargs):
"""
Big thanks to mergen on stackexchange for this:
http://stackoverflow.com/a/8832212
This function should be called instead of direct scipy.io.loadmat
as it cures the problem of not properly recovering python dictionaries
from mat files. It calls the function check keys to cure all entries
which are still mat-objects.
"""
kwargs["struct_as_record"] = False
kwargs["squeeze_me"] = True
data = io.loadmat(filename, **kwargs)
return _check_keys(data, check_arrays)
def _check_keys(dict, check_arrays):
"""
Checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries.
"""
for key in dict:
if isinstance(dict[key], io.matlab.mio5_params.mat_struct):
dict[key] = _todict(dict[key])
if isinstance(dict[key], np.ndarray) and check_arrays:
shape = dict[key].shape
array = dict[key].flatten()
for i, item in enumerate(array):
if isinstance(item, io.matlab.mio5_params.mat_struct):
array[i] = _todict(item)
dict[key] = array.reshape(shape)
return dict
def _todict(matobj):
"""
A recursive function which constructs from matobjects nested dictionaries.
"""
dict = {}
for strg in matobj._fieldnames:
elem = matobj.__dict__[strg]
if isinstance(elem, io.matlab.mio5_params.mat_struct):
dict[strg] = _todict(elem)
else:
dict[strg] = elem
return dict
def datenum_to_datetime(datenum):
"""
Convert a MATLAB datenums into python datetimes.
Parameters
----------
datenum : array_like
MATLAB datenumber which is the number of days since 0000-01-00.
Returns
-------
dt : ndarray
Python datetime. See datetime module.
"""
def convert(datenum):
try:
return (
datetime.fromordinal(int(datenum))
+ timedelta(days=datenum % 1)
- timedelta(days=366)
)
except ValueError:
return np.nan
if np.iterable(datenum):
datenumar = np.asarray(datenum)
shape = datenumar.shape
dt = np.array([convert(el) for el in datenumar.flat])
dt = dt.reshape(shape)
else:
dt = convert(datenum)
return dt
def POSIX_to_datetime(POSIX, tz=pytz.utc):
"""
Convert POSIX timestamps into python datetimes.
Parameters
----------
POSIX : array_like
A POSIX timestamp or array of timestamps.
tz : tzinfo class
Time zone information, the default is to assume UTC, e.g. tz=pytz.utc
Returns
-------
dt : ndarray
Python datetime. See datetime module.
"""
def convert(POSIX):
try:
return (
datetime.fromtimestamp(POSIX, tz)
)
except ValueError:
return np.nan
if np.iterable(POSIX):
par = np.asarray(POSIX)
shape = par.shape
dt = np.array([convert(el) for el in par.flat])
dt = dt.reshape(shape)
else:
dt = convert(POSIX)
return dt
def mid(x, axis=0):
"""Returns mid point values along given axis."""
ndim = np.ndim(x)
if ndim == 1:
return 0.5 * (x[1:] + x[:-1])
elif ndim > 1:
x_ = np.swapaxes(x, axis, 0)
xmid_ = 0.5 * (x_[1:, ...] + x_[:-1, ...])
return np.swapaxes(xmid_, 0, axis)
else:
raise ValueError
def nan_interp(x, xp, fp, left=None, right=None, axis=0, squeeze_me=True):
"""See numpy.interp documentation. This does the same thing but ignores NaN
values in the data. It can accept 2D arrays.
Parameters
----------
x : float or 1D array
The x-coordinates of the interpolated values. No NaNs please!
xp : 1D or 2D array of floats
The x-coordinates of the data points, must be increasing along the
dimension along which the interpolation is being performed.
fp : 1D or 2D array of floats or complex
The y-coordinates of the data points, same shape as `xp`.
left : optional float or complex corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float or complex corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
axis : [-1, 0, 1] int
Default is 0. The axis along which to perform the interpolation.
squeeze_me : boolean
Default is True. Squeeze output to remove singleton dimensions.
Returns
-------
y : ndarray
The interpolated values.
"""
if axis not in [-1, 0, 1]:
raise ValueError("The axis may be only -1, 0 or 1.")
if xp.shape != fp.shape:
raise ValueError("xp and fp have different shapes.")
ndim = np.ndim(xp)
if ndim > 2:
raise ValueError("Only 1 or 2 dimensional arrays are supported.")
nans = np.isnan(xp) | np.isnan(fp)
if ndim == 1:
y = np.full_like(x, np.nan)
y = np.interp(x, xp[~nans], fp[~nans], left, right)
if ndim == 2:
nr, nc = xp.shape
if axis == 0:
if np.iterable(x):
y = np.full((len(x), nc), np.nan)
else:
y = np.full((1, nc), np.nan)
for i in range(nc):
xp_ = xp[~nans[:, i], i]
fp_ = fp[~nans[:, i], i]
y[:, i] = np.interp(x, xp_, fp_, left, right)
if axis == -1 or axis == 1:
if axis == 0:
if np.iterable(x):
y = np.full((nr, len(x)), np.nan)
else:
y = np.full((nr, 1), np.nan)
for i in range(nr):
xp_ = xp[i, ~nans[i, :]]
fp_ = fp[i, ~nans[i, :]]
y[i, :] = np.interp(x, xp_, fp_, left, right)
if squeeze_me:
return np.squeeze(y)
else:
return y
def interp_fill_valid_2D(x, xp, fp):
"""
Assumes input values fp is 2D with size N*M,
where M denotes profiles and N depths.
Parameters
----------
x : numpy array
Locations to interpolate to, 1D.
xp : numpy array
Data locations, 1D or 2D, shape (N) or (N, M).
fp : numpy array
Data values, 2D, shape (N, M).
"""
nc = fp.shape[1]
nr = x.size
f = np.full((nr, nc), np.nan)
if np.ndim(xp) == 1:
for i in range(nc):
f[:, i] = interp_fill_valid(x, xp, fp[:, i])
elif np.ndim(xp) == 2:
for i in range(nc):
f[:, i] = interp_fill_valid(x, xp[:, i], fp[:, i])
else:
raise ValueError("xp dimensions are wrong.")
return f
def interp_fill_valid(x, xp, fp):
"""Interpolate to x, and invalid regions with NaN. Region to fill is
that out of range of max(xp) and min(xp)."""
valid = np.isfinite(fp)
if any(valid):
xmax = np.max(xp[valid])
xmin = np.min(xp[valid])
f = np.interp(x, xp[valid], fp[valid])
f[(x > xmax) | (x < xmin)] = np.nan
else:
f = fp
return f
def check_files(files):
"""
Assumes that files is a dict or Munch object containing full file paths.
"""
for key in files:
# Skip check for non-string objects.
if type(files[key]) != str:
continue
if not os.path.isfile(files[key]):
raise ValueError("{} file not found: {}".format(key, files[key]))
else:
print("Found {} at '{}'.".format(key, files[key]))
def find_files(args, dataset, paths_file="file_paths.yml"):
"""
args: command line args
dataset: yaml file path parameter key e.g. "sep2018"
returns files as Munch
"""
# Grab the data file paths from the yml file.
with open(paths_file, "r") as f:
try:
all_files = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
# Grab file path info for the specified dataset
file_info = munchify(all_files[dataset])
files = Munch()
# Join root directory specified by command line arguments with path
# specified in the yaml file.
for key in file_info:
files[key] = os.path.join(args[file_info[key].root], file_info[key].path)
check_files(files)
return files
def load_parameters(parameter_file="processing_parameters.yml"):
"""Load processing parameters into Munch."""
with open(parameter_file, "r") as f:
try:
params = yaml.safe_load(f)
except yaml.YAMLError as exc:
print(exc)
return munchify(params)
def closest_index(x, a):
"""
x: value
a: array
"""
return np.argmin(np.abs(x - a))
def regrid_profiles(time, timep, fp, time_win=60.0):
""""""
dt = time_win / (2 * 86400)
nc = time.size
idxs = []
idxps = []
for i in range(nc):
time_diff = np.abs(timep - time[i])
time_min = np.min(time_diff)
# Skip if not within time window
if time_min > dt:
continue
idx = np.argmin(time_diff)
# Skip if already found
if idx in idxs:
continue
idxs.append(i)
idxps.append(idx)
idxs = np.asarray(idxs)
idxps = np.asarray(idxps)
ndim = np.ndim(fp)
if ndim == 1:
f = np.full_like(time, np.nan)
f[idxs] = fp[idxps]
elif ndim == 2:
nr = fp.shape[0]
f = np.full((nr, nc), np.nan)
f[:, idxs] = fp[:, idxps]
return f
def apply_utm(m):
"""m is a Munch object"""
m.x, m.y, m.zone_number, m.zone_letter = utm.from_latlon(m.lat, m.lon)
return m
def rolling_window(a, size):
pad = np.ones(len(a.shape), dtype=np.int32)
pad[-1] = size - 1
pad = list(zip(pad, np.zeros(len(a.shape), dtype=np.int32)))
a = np.pad(a, pad, mode="reflect")
shape = a.shape[:-1] + (a.shape[-1] - size + 1, size)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def despike(x, n1=3, n2=6, size=101, xmin=-1.0, xmax=1.0, fill=True):
"""
Despike data using a median filter and 2 pass standard deviation threshold.
Parameters
----------
x : numpy array
Data, evenly spaced.
n1 : float
Pass 1 significance threshold, n standard deviations from reference data.
n2 : float
Pass 2 significance threshold, n standard deviations from reference data.
size : float, optional
Number of data points in the filter window.
xmin : float, optional
Minimum value of x, data below | |
"""
Tools for converting Cran packages to conda recipes.
"""
from __future__ import absolute_import, division, print_function
import requests
import yaml
import re
import sys
from os import makedirs, listdir
from os.path import join, exists, isfile, basename, isdir
from itertools import chain
import subprocess
from conda.install import rm_rf
from conda_build import source, metadata
CRAN_META = """\
package:
name: {packagename}
# Note that conda versions cannot contain -, so any -'s in the version have
# been replaced with _'s.
version: "{conda_version}"
source:
{fn_key} {filename}
{url_key} {cranurl}
{git_url_key} {git_url}
{git_tag_key} {git_tag}
# You can add a hash for the file here, like md5 or sha1
# md5: 49448ba4863157652311cc5ea4fea3ea
# sha1: 3bcfbee008276084cbb37a2b453963c61176a322
# patches:
# List any patch files here
# - fix.patch
build:
# If this is a new build for the same version, increment the build
# number. If you do not include this key, it defaults to 0.
# number: 1
# This is required to make R link correctly on Linux.
rpaths:
- lib/R/lib/
- lib/
{suggests}
requirements:
build:{build_depends}
run:{run_depends}
test:
commands:
# You can put additional test commands to be run here.
- $R -e "library('{cran_packagename}')" # [not win]
- "\\"%R%\\" -e \\"library('{cran_packagename}')\\"" # [win]
# You can also put a file called run_test.py, run_test.sh, or run_test.bat
# in the recipe that will be run at test time.
# requires:
# Put any additional test requirements here.
about:
{home_comment}home:{homeurl}
license: {license}
{summary_comment}summary:{summary}
# The original CRAN metadata for this package was:
{cran_metadata}
# See
# http://docs.continuum.io/conda/build.html for
# more information about meta.yaml
"""
CRAN_BUILD_SH = """\
#!/bin/bash
# R refuses to build packages that mark themselves as Priority: Recommended
mv DESCRIPTION DESCRIPTION.old
grep -v '^Priority: ' DESCRIPTION.old > DESCRIPTION
$R CMD INSTALL --build .
# Add more build steps here, if they are necessary.
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build process.
"""
CRAN_BLD_BAT = """\
"%R%" CMD INSTALL --build .
if errorlevel 1 exit 1
@rem Add more build steps here, if they are necessary.
@rem See
@rem http://docs.continuum.io/conda/build.html
@rem for a list of environment variables that are set during the build process.
"""
INDENT = '\n - '
CRAN_KEYS = [
'Site',
'Archs',
'Depends',
'Enhances',
'Imports',
'License',
'License_is_FOSS',
'License_restricts_use',
'LinkingTo',
'MD5sum',
'NeedsCompilation',
'OS_type',
'Package',
'Path',
'Priority',
'Suggests',
'Version',
'Title',
'Author',
'Maintainer',
]
# The following base/recommended package names are derived from R's source
# tree (R-3.0.2/share/make/vars.mk). Hopefully they don't change too much
# between versions.
R_BASE_PACKAGE_NAMES = (
'base',
'tools',
'utils',
'grDevices',
'graphics',
'stats',
'datasets',
'methods',
'grid',
'splines',
'stats4',
'tcltk',
'compiler',
'parallel',
)
R_RECOMMENDED_PACKAGE_NAMES = (
'MASS',
'lattice',
'Matrix',
'nlme',
'survival',
'boot',
'cluster',
'codetools',
'foreign',
'KernSmooth',
'rpart',
'class',
'nnet',
'spatial',
'mgcv',
)
# Stolen then tweaked from debian.deb822.PkgRelation.__dep_RE.
VERSION_DEPENDENCY_REGEX = re.compile(
r'^\s*(?P<name>[a-zA-Z0-9.+\-]{1,})'
r'(\s*\(\s*(?P<relop>[>=<]+)\s*'
r'(?P<version>[0-9a-zA-Z:\-+~.]+)\s*\))'
r'?(\s*\[(?P<archs>[\s!\w\-]+)\])?\s*$'
)
def dict_from_cran_lines(lines):
d = {}
for line in lines:
if not line:
continue
try:
(k, v) = line.split(': ', 1)
except ValueError:
sys.exit("Error: Could not parse metadata (%s)" % line)
d[k] = v
# if k not in CRAN_KEYS:
# print("Warning: Unknown key %s" % k)
d['orig_lines'] = lines
return d
def remove_package_line_continuations(chunk):
"""
>>> chunk = [
'Package: A3',
'Version: 0.9.2',
'Depends: R (>= 2.15.0), xtable, pbapply',
'Suggests: randomForest, e1071',
'Imports: MASS, R.methodsS3 (>= 1.5.2), R.oo (>= 1.15.8), R.utils (>=',
' 1.27.1), matrixStats (>= 0.8.12), R.filesets (>= 2.3.0), ',
' sampleSelection, scatterplot3d, strucchange, systemfit',
'License: GPL (>= 2)',
'NeedsCompilation: no']
>>> remove_package_line_continuations(chunk)
['Package: A3',
'Version: 0.9.2',
'Depends: R (>= 2.15.0), xtable, pbapply',
'Suggests: randomForest, e1071',
'Imports: MASS, R.methodsS3 (>= 1.5.2), R.oo (>= 1.15.8), R.utils (>= 1.27.1), matrixStats (>= 0.8.12), R.filesets (>= 2.3.0), sampleSelection, scatterplot3d, strucchange, systemfit, rgl,'
'License: GPL (>= 2)',
'NeedsCompilation: no']
"""
continuation = (' ', '\t')
continued_ix = None
continued_line = None
had_continuation = False
accumulating_continuations = False
chunk.append('')
for (i, line) in enumerate(chunk):
if line.startswith(continuation):
line = ' ' + line.lstrip()
if accumulating_continuations:
assert had_continuation
continued_line += line
chunk[i] = None
else:
accumulating_continuations = True
continued_ix = i-1
continued_line = chunk[continued_ix] + line
had_continuation = True
chunk[i] = None
else:
if accumulating_continuations:
assert had_continuation
chunk[continued_ix] = continued_line
accumulating_continuations = False
continued_line = None
continued_ix = None
if had_continuation:
# Remove the None(s).
chunk = [ c for c in chunk if c ]
chunk.append('')
return chunk
def yaml_quote_string(string):
"""
Quote a string for use in YAML.
We can't just use yaml.dump because it adds ellipses to the end of the
string, and it in general doesn't handle being placed inside an existing
document very well.
Note that this function is NOT general.
"""
return yaml.dump(string).replace('\n...\n', '').replace('\n', '\n ')
def clear_trailing_whitespace(string):
lines = []
for line in string.splitlines():
lines.append(line.rstrip())
return '\n'.join(lines)
def get_package_metadata(cran_url, package, session):
url = cran_url + 'web/packages/' + package + '/DESCRIPTION'
r = session.get(url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
sys.exit("ERROR: %s (404 Not Found)" % url)
raise
DESCRIPTION = r.text
d = dict_from_cran_lines(remove_package_line_continuations(DESCRIPTION.splitlines()))
d['orig_description'] = DESCRIPTION
return d
def get_latest_git_tag():
p = subprocess.Popen(['git', 'tag'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source.WORK_DIR)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr or p.returncode:
sys.exit("Error: git tag failed (%s)" % stderr)
tags = stdout.strip().splitlines()
if not tags:
sys.exit("Error: no tags found")
print("Using tag %s" % tags[-1])
return tags[-1]
def main(args, parser):
if len(args.packages) > 1 and args.version_compare:
parser.error("--version-compare only works with one package at a time")
if not args.update_outdated and not args.packages:
parser.error("At least one package must be supplied")
package_dicts = {}
[output_dir] = args.output_dir
session = requests.Session()
try:
import cachecontrol
import cachecontrol.caches
except ImportError:
print("Tip: install CacheControl to cache the CRAN metadata")
else:
session = cachecontrol.CacheControl(session,
cache=cachecontrol.caches.FileCache(join(output_dir,
'.web_cache')))
print("Fetching metadata from %s" % args.cran_url)
r = session.get(args.cran_url + "src/contrib/PACKAGES")
r.raise_for_status()
PACKAGES = r.text
package_list = [remove_package_line_continuations(i.splitlines()) for i in PACKAGES.split('\n\n')]
cran_metadata = {d['Package'].lower(): d for d in map(dict_from_cran_lines,
package_list)}
if args.update_outdated:
args.packages = get_outdated(args.output_dir, cran_metadata, args.packages)
for pkg in args.packages:
rm_rf(join(args.output_dir, 'r-' + pkg))
while args.packages:
package = args.packages.pop()
is_github_url = 'github.com' in package
url = package
if is_github_url:
rm_rf(source.WORK_DIR)
source.git_source({'git_url': package}, '.')
git_tag = args.git_tag[0] if args.git_tag else get_latest_git_tag()
p = subprocess.Popen(['git', 'checkout', git_tag], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=source.WORK_DIR)
stdout, stderr = p.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if p.returncode:
sys.exit("Error: 'git checkout %s' failed (%s).\nInvalid tag?" % (git_tag, stderr.strip()))
if stdout:
print(stdout, file=sys.stdout)
if stderr:
print(stderr, file=sys.stderr)
DESCRIPTION = join(source.WORK_DIR, "DESCRIPTION")
if not isfile(DESCRIPTION):
sys.exit("%s does not appear to be a valid R package (no DESCRIPTION file)" % package)
with open(DESCRIPTION) as f:
description_text = f.read()
d = dict_from_cran_lines(remove_package_line_continuations(description_text.splitlines()))
d['orig_description'] = description_text
package = d['Package'].lower()
cran_metadata[package] = d
if package.startswith('r-'):
package = package[2:]
if package.lower() not in cran_metadata:
sys.exit("Package %s not found" % package)
# Make sure package is always uses the CRAN capitalization
package = cran_metadata[package.lower()]['Package']
if not is_github_url:
cran_metadata[package.lower()].update(get_package_metadata(args.cran_url,
package, session))
dir_path = join(output_dir, 'r-' + package.lower())
if exists(dir_path) and not args.version_compare:
raise RuntimeError("directory already exists: %s" % dir_path)
cran_package = cran_metadata[package.lower()]
d = package_dicts.setdefault(package,
{
'cran_packagename': package,
'packagename': 'r-' + package.lower(),
'build_depends': '',
'run_depends': '',
# CRAN doesn't seem to have this metadata :(
'home_comment': '#',
'homeurl': '',
'summary_comment': '#',
'summary': '',
})
if is_github_url:
d['url_key'] = ''
d['fn_key'] = ''
d['git_url_key'] = 'git_url:'
d['git_tag_key'] = 'git_tag:'
d['filename'] = ''
d['cranurl'] = ''
d['git_url'] = url
d['git_tag'] = git_tag
else:
d['url_key'] = 'url:'
d['fn_key'] = 'fn:'
d['git_url_key'] = ''
d['git_tag_key'] = ''
d['git_url'] = ''
d['git_tag'] = ''
if args.version:
raise NotImplementedError("Package versions from CRAN are not yet implemented")
[version] = args.version
d['version'] = version
d['cran_version'] = cran_package['Version']
# Conda versions cannot have -. Conda (verlib) will treat _ as a .
d['conda_version'] = d['cran_version'].replace('-', '_')
if args.version_compare:
sys.exit(not version_compare(dir_path, d['conda_version']))
if not is_github_url:
d['filename'] = "{cran_packagename}_{cran_version}.tar.gz".format(**d)
if args.archive:
d['cranurl'] = (INDENT + args.cran_url + 'src/contrib/' +
d['filename'] + INDENT + args.cran_url + 'src/contrib/' +
'Archive/' + d['cran_packagename'] + '/' + d['filename'])
else:
d['cranurl'] = ' ' + args.cran_url + 'src/contrib/' + d['filename']
d['cran_metadata'] = '\n'.join(['# %s' % l for l in
cran_package['orig_lines'] if l])
# XXX: We should maybe normalize these
d['license'] = cran_package.get("License", "None")
if 'License_is_FOSS' in cran_package:
d['license'] += ' (FOSS)'
if cran_package.get('License_restricts_use', None) == 'yes':
d['license'] += ' (Restricts use)'
if "URL" in cran_package:
d['home_comment'] = ''
d['homeurl'] = ' ' + yaml_quote_string(cran_package['URL'])
if 'Description' in cran_package:
d['summary_comment'] | |
None and not update:
message = ("Nodules have already been extracted. " +
"Put update argument as True for refreshing")
logger.warning(message)
return self
if nodules_records is not None:
# load from record-array
self.nodules = nodules_records
else:
# assume that nodules is supplied and load from it
required_columns = np.array(['seriesuid', 'diameter_mm',
'coordZ', 'coordY', 'coordX'])
if not (isinstance(nodules, pd.DataFrame) and np.all(np.in1d(required_columns, nodules.columns))):
raise ValueError(("Argument 'nodules' must be pandas DataFrame"
+ " with {} columns. Make sure that data provided"
+ " in correct format.").format(required_columns.tolist()))
nodules_df = nodules.set_index('seriesuid')
unique_indices = nodules_df.index.unique()
inter_index = np.intersect1d(unique_indices, self.indices)
nodules_df = nodules_df.loc[inter_index,
["coordZ", "coordY",
"coordX", "diameter_mm"]]
num_nodules = nodules_df.shape[0]
self.nodules = np.rec.array(np.zeros(num_nodules,
dtype=self.nodules_dtype))
counter = 0
for pat_id, coordz, coordy, coordx, diam in nodules_df.itertuples():
pat_pos = self.index.get_pos(pat_id)
self.nodules.patient_pos[counter] = pat_pos
self.nodules.nodule_center[counter, :] = np.array([coordz,
coordy,
coordx])
self.nodules.nodule_size[counter, :] = np.array([diam, diam, diam])
counter += 1
self._refresh_nodules_info(images_loaded)
return self
@action
def fetch_nodules_from_mask(self, images_loaded=True, src='masks'):
""" Fetch nodules info (centers and sizes) from masks.
Runs skimage.measure.labels for fetching nodules regions
from masks. Extracts nodules info from segmented regions
and put this information in self.nodules np.recarray.
Parameters
----------
images_loaded : bool
if True, i.e. `images` component is loaded,
and image_size is used to compute
correct nodules location inside `skyscraper`.
If False, it doesn't update info of location
inside `skyscraper`.
src : str
name of the component with masks
Returns
-------
batch
Notes
-----
Sizes along [zyx] will be the same.
"""
nodules_list = []
for pos in range(len(self)):
mask = self.unpack(src)[pos]
mask_labels = measure.label(mask, background=0)
for props in measure.regionprops(np.int16(mask_labels)):
center = np.asarray((props.centroid[0],
props.centroid[1],
props.centroid[2]), dtype=np.float)
center = center * self.spacing[pos] + self.origin[pos]
diameter = np.asarray(
[props.equivalent_diameter] * 3, dtype=np.float)
diameter = diameter * self.spacing[pos]
nodules_list.append({'patient_pos': pos,
'nodule_center': center,
'nodule_size': diameter})
num_nodules = len(nodules_list)
self.nodules = np.rec.array(
np.zeros(num_nodules, dtype=self.nodules_dtype))
for i, nodule in enumerate(nodules_list):
self.nodules.patient_pos[i] = nodule['patient_pos']
self.nodules.nodule_center[i, :] = nodule['nodule_center']
self.nodules.nodule_size[i, :] = nodule['nodule_size']
self._refresh_nodules_info(images_loaded)
return self
# TODO: another name of method
def _fit_into_bounds(self, size, variance=None):
""" Fetch start voxel coordinates of all nodules.
Get start voxel coordinates of all nodules in batch.
Note that all nodules are considered to have
fixed same size defined by argument size: if nodule is out of
patient's 3d image bounds than it's center is shifted to border.
Parameters
----------
size : list or tuple of ndarrays
ndarray(3, ) with diameters of nodules in (z,y,x).
variance : ndarray(3, )
diagonal elements of multivariate normal distribution,
for sampling random shifts along (z,y,x) correspondingly.
Returns
-------
ndarray
start coordinates (z,y,x) of all nodules in batch.
"""
size = np.array(size, dtype=np.int)
center_pix = np.abs(self.nodules.nodule_center -
self.nodules.origin) / self.nodules.spacing
start_pix = (np.rint(center_pix) - np.rint(size / 2))
if variance is not None:
start_pix += np.random.multivariate_normal(np.zeros(3),
np.diag(variance),
self.nodules.patient_pos.shape[0])
end_pix = start_pix + size
bias_upper = np.maximum(end_pix - self.nodules.img_size, 0)
start_pix -= bias_upper
end_pix -= bias_upper
bias_lower = np.maximum(-start_pix, 0)
start_pix += bias_lower
end_pix += bias_lower
return (start_pix + self.nodules.offset).astype(np.int)
@action
def create_mask(self, mode='rectangle'):
""" Create `masks` component from `nodules` component.
Parameters
----------
mode : 'rectangle' or 'ellipse'
form of the nodule in mask
Notes
-----
`nodules` must be not None before calling this method.
see :func:`~radio.preprocessing.ct_masked_batch.CTImagesMaskedBatch.fetch_nodules_info`
for more details.
"""
if self.nodules is None:
message = ("Info about nodules location must " +
"be loaded before calling this method. " +
"Nothing happened.")
logger.warning(message)
self.masks = np.zeros_like(self.images)
center_pix = np.abs(self.nodules.nodule_center -
self.nodules.origin) / self.nodules.spacing
radius_pix = np.rint(self.nodules.nodule_size / self.nodules.spacing / 2)
center_pix = np.rint(center_pix).astype(np.int)
radius_pix = np.rint(radius_pix).astype(np.int)
if mode == 'rectangle':
start_pix = (center_pix - radius_pix)
start_pix = np.rint(start_pix).astype(np.int)
make_rect_mask_numba(self.masks, self.nodules.offset,
self.nodules.img_size + self.nodules.offset, start_pix,
np.rint(self.nodules.nodule_size / self.nodules.spacing))
elif mode == 'ellipse':
make_ellipse_mask_numba(self.masks, self.nodules.offset.astype(np.int32),
self.nodules.img_size + self.nodules.offset,
center_pix, radius_pix)
return self
@action
def truncate_mask(self, threshold=0.2, min_val=0, max_val=255):
""" Truncate mask by images.
Parameters
----------
threshold : float
binarizing thresholg for initial image
min_val : float
minimum value of image
max_val : float
maximum value of image
"""
self.masks = np.array(self.masks * self.images > threshold * (max_val - min_val), dtype=np.int32)
return self
def fetch_mask(self, shape):
""" Create `masks` component of different size then `images`,
using `nodules` component.
Parameters
----------
shape : tuple, list or ndarray of int.
(z_dim,y_dim,x_dim), shape of mask to be created.
Returns
-------
ndarray
3d array with masks in form of `skyscraper`.
# TODO: one part of code from here repeats create_mask function
better to unify these two func
"""
if self.nodules is None:
message = ("Info about nodules location must " +
"be loaded before calling this method. " +
"Nothing happened.")
logger.warning(message)
mask = np.zeros(shape=(len(self) * shape[0], *shape[1:]))
# infer scale factor; assume patients are already resized to equal
# shapes
scale_factor = np.asarray(shape) / self.images_shape[0, :]
# get rescaled nodule-centers, nodule-sizes, offsets, locs of nod
# starts
center_scaled = (np.abs(self.nodules.nodule_center - self.nodules.origin) /
self.nodules.spacing * scale_factor)
start_scaled = (center_scaled - scale_factor * self.nodules.nodule_size /
self.nodules.spacing / 2)
start_scaled = np.rint(start_scaled).astype(np.int)
offset_scaled = np.rint(self.nodules.offset *
scale_factor).astype(np.int)
img_size_scaled = np.rint(
self.nodules.img_size * scale_factor).astype(np.int)
nod_size_scaled = (np.rint(scale_factor * self.nodules.nodule_size /
self.nodules.spacing)).astype(np.int)
# put nodules into mask
make_rect_mask_numba(mask, offset_scaled, img_size_scaled + offset_scaled,
start_scaled, nod_size_scaled)
# return ndarray-mask
return mask
# TODO rename function to sample_random_nodules_positions
def sample_random_nodules(self, num_nodules, nodule_size, histo=None):
""" Sample random nodules positions in CTImagesBatchMasked.
Samples random nodules positions in ndarray. Each nodule have shape
defined by `nodule_size`. If size of patients' data along z-axis
is not the same for different patients, NotImplementedError will be raised.
Parameters
----------
num_nodules : int
number of nodules to sample from dataset.
nodule_size : ndarray(3, )
crop shape along (z,y,x).
histo : tuple
np.histogram()'s output.
3d-histogram, represented by tuple (bins, edges).
Returns
-------
ndarray
ndarray(num_nodules, 3). 1st array's dim is an index of sampled
nodules, 2nd points out start positions (integers) of nodules
in batch `skyscraper`.
"""
all_indices = np.arange(len(self))
sampled_indices = np.random.choice(
all_indices, num_nodules, replace=True)
offset = np.zeros((num_nodules, 3))
offset[:, 0] = self.lower_bounds[sampled_indices]
data_shape = self.images_shape[sampled_indices, :]
# if supplied, use histogram as the sampler
if histo is None:
sampler = lambda size: np.random.rand(size, 3)
else:
sampler = lambda size: sample_histo3d(histo, size)
samples = sampler(size=num_nodules) * (data_shape - nodule_size)
if histo is not None:
samples /= data_shape
return np.asarray(samples + offset, dtype=np.int), sampled_indices
@action
def sample_nodules(self, batch_size, nodule_size=(32, 64, 64), share=0.8, variance=None, # pylint: disable=too-many-locals, too-many-statements
mask_shape=None, histo=None):
""" Sample random crops of `images` and `masks` from batch.
Create random crops, both with and without nodules in it, from input batch.
Parameters
----------
batch_size : int
number of nodules in the output batch. Required,
if share=0.0. If None, resulting batch will include all
cancerous nodules.
nodule_size : tuple, list or ndarray of int
crop shape along (z,y,x).
share : float
share of cancer crops in the batch.
if input CTImagesBatch contains less cancer
nodules than needed random nodules will be taken.
variance : tuple, list or ndarray of float
variances of normally distributed random shifts of
nodules' start positions.
mask_shape : tuple, list or ndarray of int
size of `masks` crop in (z,y,x)-order. If not None,
crops with masks would be of mask_shape.
If None, mask crop shape would be equal to crop_size.
histo : tuple
np.histogram()'s output.
Used for sampling non-cancerous crops.
Returns
-------
Batch
batch with cancerous and non-cancerous crops in a proportion defined by
`share` with total `batch_size` nodules. If `share` == 1.0, `batch_size`
is None, resulting batch consists of all cancerous crops stored in batch.
"""
# make sure that nodules' info is fetched and args are OK
if self.nodules is None:
raise AttributeError("Info about nodules location must " +
"be loaded before calling this method")
if variance is not None:
variance = np.asarray(variance, dtype=np.int)
variance = variance.flatten()
if len(variance) != 3:
message = ('Argument variance be np.array-like' +
'and has shape (3,). ' +
'Would be used no-scale-shift.')
logger.warning(message)
variance = None
if share == 0.0 and batch_size is None:
raise ValueError('Either supply batch_size or set share to positive number')
# pos of batch-items | |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import tempfile
import mox
import six
from six.moves.urllib import error
from six.moves.urllib import request
import testtools
from testtools import matchers
import yaml
from heatclient.common import template_utils
from heatclient.common import utils
from heatclient import exc
class ShellEnvironmentTest(testtools.TestCase):
template_a = b'{"heat_template_version": "2013-05-23"}'
def setUp(self):
super(ShellEnvironmentTest, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def collect_links(self, env, content, url, env_base_url=''):
jenv = yaml.safe_load(env)
files = {}
if url:
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen(url).AndReturn(six.BytesIO(content))
request.urlopen(url).AndReturn(six.BytesIO(content))
self.m.ReplayAll()
template_utils.resolve_environment_urls(
jenv.get('resource_registry'), files, env_base_url)
if url:
self.assertEqual(content.decode('utf-8'), files[url])
def test_ignore_env_keys(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env = b'''
resource_registry:
resources:
bar:
hooks: pre_create
restricted_actions: replace
'''
request.urlopen('file://%s' % env_file).AndReturn(
six.BytesIO(env))
self.m.ReplayAll()
_, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{u'resource_registry': {u'resources': {
u'bar': {u'hooks': u'pre_create',
u'restricted_actions': u'replace'}}}},
env_dict)
self.m.VerifyAll()
def test_process_environment_file(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": "file:///home/b/a.yaml"
'''
request.urlopen('file://%s' % env_file).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': 'file:///home/b/a.yaml'}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
def test_process_environment_relative_file(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": a.yaml
'''
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
self.assertEqual(
env_url,
utils.normalise_file_path_to_url(env_file))
self.assertEqual(
'file:///home/my/dir',
utils.base_url_for_url(env_url))
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': 'file:///home/my/dir/a.yaml'}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/my/dir/a.yaml'])
def test_process_environment_relative_file_up(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": ../bar/a.yaml
'''
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/my/bar/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/my/bar/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
env_url = 'file://%s' % env_file
self.assertEqual(
env_url,
utils.normalise_file_path_to_url(env_file))
self.assertEqual(
'file:///home/my/dir',
utils.base_url_for_url(env_url))
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual(
{'resource_registry': {
'OS::Thingy': 'file:///home/my/bar/a.yaml'}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/my/bar/a.yaml'])
def test_process_environment_url(self):
env = b'''
resource_registry:
"OS::Thingy": "a.yaml"
'''
url = 'http://no.where/some/path/to/file.yaml'
tmpl_url = 'http://no.where/some/path/to/a.yaml'
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen(url).AndReturn(six.BytesIO(env))
request.urlopen(tmpl_url).AndReturn(six.BytesIO(self.template_a))
request.urlopen(tmpl_url).AndReturn(six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
url)
self.assertEqual({'resource_registry': {'OS::Thingy': tmpl_url}},
env_dict)
self.assertEqual(self.template_a.decode('utf-8'), files[tmpl_url])
def test_process_environment_empty_file(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env = b''
request.urlopen('file://%s' % env_file).AndReturn(six.BytesIO(env))
self.m.ReplayAll()
files, env_dict = template_utils.process_environment_and_files(
env_file)
self.assertEqual({}, env_dict)
self.assertEqual({}, files)
def test_no_process_environment_and_files(self):
files, env = template_utils.process_environment_and_files()
self.assertEqual({}, env)
self.assertEqual({}, files)
def test_process_multiple_environments_and_files(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file1 = '/home/my/dir/env1.yaml'
env_file2 = '/home/my/dir/env2.yaml'
env1 = b'''
parameters:
"param1": "value1"
resource_registry:
"OS::Thingy1": "file:///home/b/a.yaml"
'''
env2 = b'''
parameters:
"param2": "value2"
resource_registry:
"OS::Thingy2": "file:///home/b/b.yaml"
'''
request.urlopen('file://%s' % env_file1).AndReturn(
six.BytesIO(env1))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file://%s' % env_file2).AndReturn(
six.BytesIO(env2))
request.urlopen('file:///home/b/b.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env = template_utils.process_multiple_environments_and_files(
[env_file1, env_file2])
self.assertEqual(
{
'resource_registry': {
'OS::Thingy1': 'file:///home/b/a.yaml',
'OS::Thingy2': 'file:///home/b/b.yaml'},
'parameters': {
'param1': 'value1',
'param2': 'value2'}
},
env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/b.yaml'])
def test_process_multiple_environments_default_resources(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file1 = '/home/my/dir/env1.yaml'
env_file2 = '/home/my/dir/env2.yaml'
env1 = b'''
resource_registry:
resources:
resource1:
"OS::Thingy1": "file:///home/b/a.yaml"
resource2:
"OS::Thingy2": "file:///home/b/b.yaml"
'''
env2 = b'''
resource_registry:
resources:
resource1:
"OS::Thingy3": "file:///home/b/a.yaml"
resource2:
"OS::Thingy4": "file:///home/b/b.yaml"
'''
request.urlopen('file://%s' % env_file1).InAnyOrder().AndReturn(
six.BytesIO(env1))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file://%s' % env_file2).InAnyOrder().AndReturn(
six.BytesIO(env2))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/b.yaml').InAnyOrder().AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
files, env = template_utils.process_multiple_environments_and_files(
[env_file1, env_file2])
self.assertEqual(
{
'resource_registry': {
'resources': {
'resource1': {
'OS::Thingy1': 'file:///home/b/a.yaml',
'OS::Thingy3': 'file:///home/b/a.yaml'
},
'resource2': {
'OS::Thingy2': 'file:///home/b/b.yaml',
'OS::Thingy4': 'file:///home/b/b.yaml'
}
}
}
},
env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/b.yaml'])
def test_no_process_multiple_environments_and_files(self):
files, env = template_utils.process_multiple_environments_and_files()
self.assertEqual({}, env)
self.assertEqual({}, files)
def test_process_multiple_environments_and_files_from_object(self):
env_object = 'http://no.where/path/to/env.yaml'
env1 = b'''
parameters:
"param1": "value1"
resource_registry:
"OS::Thingy1": "b/a.yaml"
'''
self.m.ReplayAll()
self.object_requested = False
def env_path_is_object(object_url):
return True
def object_request(method, object_url):
self.object_requested = True
self.assertEqual('GET', method)
self.assertTrue(object_url.startswith("http://no.where/path/to/"))
if object_url == env_object:
return env1
else:
return self.template_a
files, env = template_utils.process_multiple_environments_and_files(
env_paths=[env_object], env_path_is_object=env_path_is_object,
object_request=object_request)
self.assertEqual(
{
'resource_registry': {
'OS::Thingy1': 'http://no.where/path/to/b/a.yaml'},
'parameters': {'param1': 'value1'}
},
env)
self.assertEqual(self.template_a.decode('utf-8'),
files['http://no.where/path/to/b/a.yaml'])
def test_process_multiple_environments_and_files_tracker(self):
# Setup
self.m.StubOutWithMock(request, 'urlopen')
env_file1 = '/home/my/dir/env1.yaml'
env1 = b'''
parameters:
"param1": "value1"
resource_registry:
"OS::Thingy1": "file:///home/b/a.yaml"
'''
request.urlopen('file://%s' % env_file1).AndReturn(
six.BytesIO(env1))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/b/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
# Test
env_file_list = []
files, env = template_utils.process_multiple_environments_and_files(
[env_file1], env_list_tracker=env_file_list)
# Verify
expected_env = {'parameters': {'param1': 'value1'},
'resource_registry':
{'OS::Thingy1': 'file:///home/b/a.yaml'}
}
self.assertEqual(expected_env, env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/b/a.yaml'])
self.assertEqual(['file:///home/my/dir/env1.yaml'], env_file_list)
self.assertIn('file:///home/my/dir/env1.yaml', files)
self.assertEqual(expected_env,
json.loads(files['file:///home/my/dir/env1.yaml']))
def test_process_environment_relative_file_tracker(self):
self.m.StubOutWithMock(request, 'urlopen')
env_file = '/home/my/dir/env.yaml'
env_url = 'file:///home/my/dir/env.yaml'
env = b'''
resource_registry:
"OS::Thingy": a.yaml
'''
request.urlopen(env_url).AndReturn(
six.BytesIO(env))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
request.urlopen('file:///home/my/dir/a.yaml').AndReturn(
six.BytesIO(self.template_a))
self.m.ReplayAll()
self.assertEqual(
env_url,
utils.normalise_file_path_to_url(env_file))
self.assertEqual(
'file:///home/my/dir',
utils.base_url_for_url(env_url))
env_file_list = []
files, env = template_utils.process_multiple_environments_and_files(
[env_file], env_list_tracker=env_file_list)
# Verify
expected_env = {'resource_registry':
{'OS::Thingy': 'file:///home/my/dir/a.yaml'}}
self.assertEqual(expected_env, env)
self.assertEqual(self.template_a.decode('utf-8'),
files['file:///home/my/dir/a.yaml'])
self.assertEqual(['file:///home/my/dir/env.yaml'], env_file_list)
self.assertEqual(json.dumps(expected_env),
files['file:///home/my/dir/env.yaml'])
def test_global_files(self):
url = 'file:///home/b/a.yaml'
env = '''
resource_registry:
"OS::Thingy": "%s"
''' % url
self.collect_links(env, self.template_a, url)
def test_nested_files(self):
url = 'file:///home/b/a.yaml'
env = '''
resource_registry:
resources:
freddy:
"OS::Thingy": "%s"
''' % url
self.collect_links(env, self.template_a, url)
def test_http_url(self):
url = 'http://no.where/container/a.yaml'
env = '''
resource_registry:
"OS::Thingy": "%s"
''' % url
self.collect_links(env, self.template_a, url)
def test_with_base_url(self):
url = 'ftp://no.where/container/a.yaml'
env = '''
resource_registry:
base_url: "ftp://no.where/container/"
resources:
server_for_me:
"OS::Thingy": a.yaml
'''
self.collect_links(env, self.template_a, url)
def test_with_built_in_provider(self):
env = '''
resource_registry:
resources:
server_for_me:
"OS::Thingy": OS::Compute::Server
'''
self.collect_links(env, self.template_a, None)
def test_with_env_file_base_url_file(self):
url = 'file:///tmp/foo/a.yaml'
env = '''
resource_registry:
resources:
server_for_me:
"OS::Thingy": a.yaml
'''
env_base_url = 'file:///tmp/foo'
self.collect_links(env, self.template_a, url, env_base_url)
def test_with_env_file_base_url_http(self):
url = 'http://no.where/path/to/a.yaml'
env = '''
resource_registry:
resources:
server_for_me:
"OS::Thingy": to/a.yaml
'''
env_base_url = 'http://no.where/path'
self.collect_links(env, self.template_a, url, env_base_url)
def test_unsupported_protocol(self):
env = '''
resource_registry:
"OS::Thingy": "sftp://no.where/dev/null/a.yaml"
'''
jenv = yaml.safe_load(env)
fields = {'files': {}}
self.assertRaises(exc.CommandError,
template_utils.get_file_contents,
jenv['resource_registry'],
fields)
class TestGetTemplateContents(testtools.TestCase):
def setUp(self):
super(TestGetTemplateContents, self).setUp()
self.m = mox.Mox()
self.addCleanup(self.m.VerifyAll)
self.addCleanup(self.m.UnsetStubs)
def test_get_template_contents_file(self):
with tempfile.NamedTemporaryFile() as tmpl_file:
tmpl = (b'{"AWSTemplateFormatVersion" : "2010-09-09",'
b' "foo": "bar"}')
tmpl_file.write(tmpl)
tmpl_file.flush()
files, tmpl_parsed = template_utils.get_template_contents(
tmpl_file.name)
self.assertEqual({"AWSTemplateFormatVersion": "2010-09-09",
"foo": "bar"}, tmpl_parsed)
self.assertEqual({}, files)
def test_get_template_contents_file_empty(self):
with tempfile.NamedTemporaryFile() as tmpl_file:
ex = self.assertRaises(
exc.CommandError,
template_utils.get_template_contents,
tmpl_file.name)
self.assertEqual(
'Could not fetch template from file://%s' % tmpl_file.name,
str(ex))
def test_get_template_file_nonextant(self):
nonextant_file = '/template/dummy/file/path/and/name.yaml'
ex = self.assertRaises(
error.URLError,
template_utils.get_template_contents,
nonextant_file)
self.assertEqual(
"<urlopen error [Errno 2] No such file or directory: '%s'>"
% nonextant_file,
str(ex))
def test_get_template_contents_file_none(self):
ex = self.assertRaises(
exc.CommandError,
template_utils.get_template_contents)
self.assertEqual(
('Need to specify exactly one of --template-file, '
'--template-url or --template-object'),
str(ex))
def test_get_template_contents_file_none_existing(self):
files, tmpl_parsed = template_utils.get_template_contents(
existing=True)
self.assertIsNone(tmpl_parsed)
self.assertEqual({}, files)
def test_get_template_contents_parse_error(self):
with tempfile.NamedTemporaryFile() as tmpl_file:
tmpl = b'{"foo": "bar"'
tmpl_file.write(tmpl)
tmpl_file.flush()
ex = self.assertRaises(
exc.CommandError,
template_utils.get_template_contents,
tmpl_file.name)
self.assertThat(
str(ex),
matchers.MatchesRegex(
'Error parsing template file://%s ' % tmpl_file.name))
def test_get_template_contents_url(self):
tmpl = b'{"AWSTemplateFormatVersion" : "2010-09-09", "foo": "bar"}'
url = 'http://no.where/path/to/a.yaml'
self.m.StubOutWithMock(request, 'urlopen')
request.urlopen(url).AndReturn(six.BytesIO(tmpl))
self.m.ReplayAll()
files, tmpl_parsed = template_utils.get_template_contents(
template_url=url)
self.assertEqual({"AWSTemplateFormatVersion": "2010-09-09",
"foo": "bar"}, tmpl_parsed)
self.assertEqual({}, files)
def test_get_template_contents_object(self):
tmpl = '{"AWSTemplateFormatVersion" : "2010-09-09", "foo": "bar"}'
url = 'http://no.where/path/to/a.yaml'
self.m.ReplayAll()
self.object_requested = False
def object_request(method, object_url):
self.object_requested = True
self.assertEqual('GET', method)
self.assertEqual('http://no.where/path/to/a.yaml', object_url)
return tmpl
files, tmpl_parsed = template_utils.get_template_contents(
template_object=url,
object_request=object_request)
self.assertEqual({"AWSTemplateFormatVersion": "2010-09-09",
"foo": "bar"}, tmpl_parsed)
self.assertEqual({}, files)
self.assertTrue(self.object_requested)
def test_get_nested_stack_template_contents_object(self):
tmpl = ('{"heat_template_version": "2016-04-08",'
'"resources": {'
'"FooBar": {'
'"type": "foo/bar.yaml"}}}')
url = 'http://no.where/path/to/a.yaml'
self.m.ReplayAll()
self.object_requested = False
def object_request(method, object_url):
self.object_requested = True
self.assertEqual('GET', method)
self.assertTrue(object_url.startswith("http://no.where/path/to/"))
if object_url == url:
return tmpl
else:
return '{"heat_template_version": "2016-04-08"}'
files, tmpl_parsed = template_utils.get_template_contents(
template_object=url,
object_request=object_request)
self.assertEqual(files['http://no.where/path/to/foo/bar.yaml'],
'{"heat_template_version": "2016-04-08"}')
self.assertTrue(self.object_requested)
def check_non_utf8_content(self, filename, content):
base_url = 'file:///tmp'
url = '%s/%s' % (base_url, filename)
template = {'resources':
{'one_init':
{'type': 'OS::Heat::CloudConfig',
'properties':
{'cloud_config':
{'write_files':
[{'path': '/tmp/%s' % filename,
'content': {'get_file': url},
'encoding': 'b64'}]}}}}}
self.m.StubOutWithMock(request, 'urlopen')
raw_content = base64.decodestring(content)
response = six.BytesIO(raw_content)
request.urlopen(url).AndReturn(response)
self.m.ReplayAll()
files = {}
template_utils.resolve_template_get_files(
template, files, base_url)
self.assertEqual({url: content}, files)
def test_get_zip_content(self):
filename = 'heat.zip'
content = b'''\
UEsDBAoAAAAAAEZZWkRbOAuBBQAAAAUAAAAIABwAaGVhdC50eHRVVAkAAxRbDVNYh\
t9SdXgLAAEE\n6AMAAATpAwAAaGVhdApQSwECHgMKAAAAAABGWVpEWzgLgQUAAAAF\
AAAACAAYAAAAAAABAAAApIEA\nAAAAaGVhdC50eHRVVAUAAxRbDVN1eAsAAQToAwA\
ABOkDAABQSwUGAAAAAAEAAQBOAAAARwAAAAAA\n'''
# zip has '\0' in stream
self.assertIn(b'\0', base64.decodestring(content))
decoded_content = base64.decodestring(content)
if six.PY3:
self.assertRaises(UnicodeDecodeError, decoded_content.decode)
else:
self.assertRaises(
UnicodeDecodeError,
json.dumps,
{'content': decoded_content})
self.check_non_utf8_content(
filename=filename, content=content)
def test_get_utf16_content(self):
filename = 'heat.utf16'
content = b'//4tTkhTCgA=\n'
# utf6 has '\0' in stream
self.assertIn(b'\0', base64.decodestring(content))
decoded_content = base64.decodestring(content)
if six.PY3:
self.assertRaises(UnicodeDecodeError, decoded_content.decode)
else:
self.assertRaises(
UnicodeDecodeError,
json.dumps,
{'content': decoded_content})
self.check_non_utf8_content(filename=filename, content=content)
def test_get_gb18030_content(self):
filename = 'heat.gb18030'
content = b'1tDO5wo=\n'
# gb18030 has no '\0' in stream
self.assertNotIn('\0', base64.decodestring(content))
decoded_content = base64.decodestring(content)
if six.PY3:
self.assertRaises(UnicodeDecodeError, decoded_content.decode)
else:
self.assertRaises(
UnicodeDecodeError,
json.dumps,
{'content': decoded_content})
| |
if type in self.__tagset:
return self[type]
elif default is None:
return self.__tagset.new('', type=type)
elif isinstance(default, Tag):
if check_type and default.type and default.type != type:
raise ValueError(
f"Could not create new tag object due to type conflicting ({repr(type)} != {repr(default.type)})")
else:
return self.__tagset._append(default.clone(type=type))
elif isinstance(default, Mapping):
_kwargs = dict(default)
if 'value' in _kwargs:
_value = _kwargs.pop("value")
else:
_value = None
if 'type' in _kwargs:
if check_type and _kwargs['type'] != type:
raise ValueError(
f"Could not create new tag object due to type conflicting ({repr(type)} != {repr(_kwargs['type'])})")
_kwargs.pop("type")
return self.__tagset.new(_value, type=type, **_kwargs)
else:
# use defaults as the input value string
return self.__tagset.new(str(default), type=type)
def __init__(self, parent=None, **kwargs):
self.__parent = parent
self.__proto_kwargs = kwargs['proto_kwargs'] if 'proto_kwargs' in kwargs else {}
self.__proto = kwargs['proto'] if 'proto' in kwargs else Tag
self.__dict__["_TagSet__tags"] = []
self.__dict__["_TagSet__tagmap"] = TagSet.TagMap(self)
self.__dict__["_TagSet__tagsmap"] = dict()
@property
def gold(self):
""" Interact with first tag (gold) directly """
return self.__tagmap
def __len__(self):
""" Number of tags in this object """
return len(self.__tags)
def __getitem__(self, type) -> T:
""" Get the all tags of a given type """
if type not in self.__tagsmap:
self.__tagsmap[type] = ProtoList(proto=self.__proto,
proto_kwargs={'type': type},
taglist_create_hook=lambda x: self.__tags.append(x))
return self.__tagsmap[type]
def __getattr__(self, type) -> T:
""" Get the first tag of a given type if it exists"""
return self[type]
def __contains__(self, type):
""" Check if there is at least a tag with a type """
return type in self.__tagsmap
def __iter__(self) -> T:
""" Loop through all tags in this set """
return iter(self.__tags)
def items(self):
""" Return an iterator to loop through all (type, value_list) pairs in this TagSet """
return self.__tagsmap.items()
def _construct_obj(self, *args, **kwargs) -> T:
""" Construct a new tag object and notify parent if possible """
if self.__proto_kwargs:
# prioritise values in kwargs rather than in default constructor kwargs
for k, v in self.__proto_kwargs.items():
if k not in self.kwargs:
kwargs[k] = v
_tag = self.__proto(*args, **kwargs)
# TODO to review this _claim book design
if self.__parent is not None and self.__parent._claim:
self.__parent._claim(_tag)
return _tag
def new(self, value, type='', *args, **kwargs) -> T:
""" Create a new generic tag object """
if not value and not type:
raise ValueError("Concept value and type cannot be both empty")
_tag = self._construct_obj(value, type, *args, **kwargs)
return self._append(_tag)
def _append(self, tag):
""" [Internal] Add an existing tag object into the list
General users should NOT use this method as it is very likely to be removed in the future
"""
self.__map_tag(tag)
self.__tags.append(tag)
return tag
def __map_tag(self, tag):
self[tag.type].append(tag)
return tag
def _replace_obj(self, old_obj, new_obj):
self.__tags.remove(old_obj)
self.__tags.append(new_obj)
if old_obj.type == new_obj.type:
_taglist = self.__tagsmap[old_obj.type]
_taglist[_taglist.index(old_obj)] = new_obj
else:
self.__tagsmap[old_obj.type].remove(old_obj)
self.__tagsmap[new_obj.type].append(new_obj)
return new_obj
def replace(self, old_obj, value: str, type='', *args, **kwargs) -> T:
""" Create a new tag to replace an existing tag object
:param old_obj: Old object to be removed and replaced with a newly crated object
:param value: text value for the new tag object
:param type: type for the new object, defaulted to an empty string
"""
new_obj = self._construct_obj(value=value, type=type, *args, **kwargs)
return self._replace_obj(old_obj, new_obj)
def remove(self, tag: T) -> T:
""" Remove a generic tag object and return them """
if tag is None:
raise ValueError("Null tag object cannot be popped")
elif tag.type not in self:
raise ValueError("This tag object does not exist in this TagSet")
else:
self.__tagsmap[tag.type].remove(tag)
self.__tags.remove(tag)
return tag
def pop(self, idx: int) -> T:
""" Remove a tag at a given position and return it """
return self.remove(self.__tags[idx])
def index(self, obj):
return self.__tags.index(obj)
def values(self, type=None):
""" Get all values of tags with the specified type or all tags when type is None """
return (t.value for t in (self[type] if type is not None else self))
def to_dict(self, *args, **kwargs):
""" Create a list of dicts from all tag objects """
return {"tags": [t.to_dict(*args, **kwargs) for t in self]}
class Token(DataObject):
""" A sentence token (i.e. a word) """
def __init__(self, text='', cfrom=-1, cto=-1, pos=None, lemma=None, comment=None, flag=None, **kwargs):
""" A token (e.g. a word in a sentence) """
super().__init__(**kwargs)
self.__tags: TagSet[Tag] = TagSet[Tag](parent=self)
self.cfrom = cfrom
self.cto = cto
self.__text = text # original/surface form
self.lemma = lemma # dictionary form
self.pos = pos
self.comment = comment
self.flag = flag
def __getitem__(self, name):
return self.tag[name].value if name in self.__tags else None
def __setitem__(self, name, value):
self.tag[name] = value
def __getattr__(self, name):
return self[name]
def __len__(self):
return len(self.__tags)
def __iter__(self):
return iter(self.__tags)
def __repr__(self):
return "`{l}`<{f}:{t}>".format(l=self.text, f=self.cfrom, t=self.cto)
def __str__(self):
return self.text
@property
def text(self):
""" Text value of a Token object """
return self.__text
@text.setter
def text(self, value):
self.__text = value
@property
def value(self):
""" Alias to Token.text """
return self.text
@value.setter
def value(self, value):
self.text = value
@property
def tag(self):
return self.__tags.gold
@property
def tag(self):
""" Interact with first tag (gold) directly """
return self.__tags.gold
@property
def tags(self):
""" Tag manager object of this sentence (list access) """
return self.__tags
def surface(self):
""" Get the surface form of this token """
# Prioritise self.text
if self.text:
return self.text
elif self.sent and self.sent.text:
return self.sent.text[self.cfrom:self.cto]
else:
return ''
def tag_map(self):
""" Build a map from tagtype to list of tags """
tm = dd(list)
for tag in self.__tags:
tm[tag.type].append(tag)
return tm
def to_dict(self):
token_json = {'cfrom': self.cfrom,
'cto': self.cto,
'text': self.text}
if self.lemma:
token_json['lemma'] = self.lemma
if self.pos:
token_json['pos'] = self.pos
if self.comment:
token_json['comment'] = self.comment
if self.flag:
token_json['flag'] = self.flag
all_tags = [t.to_dict(default_cfrom=self.cfrom, default_cto=self.cto) for t in self.tags]
if all_tags:
token_json['tags'] = all_tags
return token_json
@staticmethod
def from_dict(token_dict):
tk = Token()
tk.update(token_dict, 'cfrom', 'cto', 'text', 'lemma', 'pos', 'comment')
# rebuild tags
for tag_json in token_dict.get('tags', []):
tk.tags.new(**tag_json)
return tk
class TokenList(list):
""" A list of Token - Accept both token index and token object """
def __init__(self, *args, **kwargs):
super().__init__()
self.sent = None
def __eq__(self, other):
if not isinstance(other, list):
return False
elif len(other) != len(self):
return False
else:
for i1, i2 in zip(self, other):
if i1 != i2:
return False
return True
def __add__(self, other):
return self.extend(other)
def __iadd__(self, other):
return self.extend(other)
def __ensure_token(self, token):
if isinstance(token, Token):
return token
elif isinstance(token, int):
if self.sent is None:
raise ValueError("Using token index in a TokenList without sentence ref")
return self.sent[token]
else:
raise ValueError(f"Invalid token value: {token} (Only token index and Token objects are accepted")
def append(self, x):
""" Add tokens to this concept """
super().append(self.__ensure_token(x))
def extend(self, iterable):
""" Add all tokens from an iterable to this TokenList
:param iterable: An iterable of int (for token indices) or Token list
:raises: ValueError
"""
super().extend(self.__ensure_token(t) for t in iterable)
def insert(self, i, x):
""" Insert a token at a given position """
super().insert(i, self.__ensure_token(x))
class Concept(Tag):
""" Represent a concept in an utterance, which may refers to multiple tokens """
FLAG = 'flag'
COMMENT = 'comment'
NOT_MATCHED = 'E'
def __init__(self, value='', type=None, clemma=None, tokens=None, comment=None, flag=None, sent=None, **kwargs):
super().__init__(value, type, **kwargs)
self.__tokens = TokenList()
self.sent = sent
self.clemma = clemma
if tokens:
self.tokens.extend(tokens)
self.comment = comment
self.flag = flag
@property
def tokens(self):
return self.__tokens
@tokens.setter
def tokens(self, values):
self.__tokens.clear()
self.__tokens.extend(values)
@property
def sent(self):
return self.__sent
@sent.setter
def sent(self, value):
self.__sent = value
self.__tokens.sent = value
def __getitem__(self, idx):
""" Get the idx-th token of this concept """
return self.__tokens[idx]
def __iter__(self):
""" Iterate through all tokens in this concept """
return iter(self.__tokens)
def __len__(self):
""" Number of tokens belong to this concept """
return len(self.__tokens)
def __repr__(self):
return f'<{self.type}:{self.value}:"{self.clemma}">'
def __str__(self):
return '<{t}:"{l}">({ws})'.format(l=self.clemma, t=self.value, ws=self.__tokens)
def remove(self, token: Token):
""" Remove a Token object from this concept """
self.__tokens.remove(token)
def pop(self, idx=None) -> Token:
""" Remove a token from this concept and return it
:param idx: the index of the token to be removed. If set to None (defaulted) idx of the last token will be used
:type idx: int
"""
if idx is None:
return self.__tokens.pop()
else:
return self.__tokens.pop(idx)
def to_dict(self, *args, **kwargs):
| |
#!/usr/bin/env python
################################################################################
#
# This file is part of ghmm
#
# file: HMMEd.py
# author: <NAME>
#
# Copyright (C) 1998-2002, <NAME>
#
# Contact: <EMAIL>
#
# Information: http://ghmm.org
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
#
# This file is version $Revision: 1.29 $
# from $Date: 2005/02/22 11:12:56 $
# last change by $Author: schliep $.
#
################################################################################
import Tkinter
import math
from Gato import GatoGlobals, ProbEditorBasics, ProbEditorDialogs, ProbEditorWidgets, ProbEditorContinuous
from Gato.ObjectGred import *
import ObjectHMM
import ghmmwrapper
class ContinuousEmissionEditor(ProbEditorContinuous.gauss_editor):
def __init__(self, master, plotList):
"""
the entire dialog
"""
ProbEditorContinuous.gauss_editor.__init__(self, master, plotList, width=300, height=300)
self.normalize = Tkinter.IntVar()
self.normalize.set(1)
def buildMenu(self):
#Menuleiste
bar=Tkinter.Menu(self.root)
filem=Tkinter.Menu(bar)
editm=Tkinter.Menu(bar)
zoomm=Tkinter.Menu(bar)
addMenu=Tkinter.Menu(editm)
delMenu=Tkinter.Menu(editm)
if self.sumindi==0:
addMenu.add_radiobutton(label="Sum-Fkt", command=self.add_sum)
addMenu.add_radiobutton(label="Uniform", command=self.boxadd)
#addMenu.add_radiobutton(label="Exp-Fkt", command=self.expadd)
#addMenu.add_radiobutton(label="NegExp-Fkt", command=self.oexpadd)
addMenu.add_radiobutton(label="Gaussian", command=self.gaussadd)
addMenu.add_radiobutton(label="Gaussian left tail", command=self.gaussladd)
addMenu.add_radiobutton(label="Gaussian right tail", command=self.gaussradd)
for i in xrange(len(self.plot_list)):
delMenu.add_radiobutton(label=str(i+1), background = self.plot_list[i].color, command = self.make_del_function(i))
if self.sumindi==1:
delMenu.add_radiobutton(label="sum", background='red', command=self.del_sum)
filem.add_command(label="Save", command=self.save)
filem.add_command(label="Cancel", command=self.cancel)
editm.add_cascade(label="Add", menu=addMenu)
editm.add_cascade(label="Del", menu=delMenu)
zoomm.add_command(label="Zoom-in", command=self.zoom_in)
zoomm.add_command(label="Zoom-out", command=self.zoom_out)
zoomm.add_command(label="Normalise", command=self.norm)
bar.add_cascade(label="File", menu=filem)
bar.add_cascade(label="Edit", menu=editm)
bar.add_cascade(label="Zoom", menu=zoomm)
self.root.config(menu=bar)
def boxadd(self):
self.top=Tkinter.Toplevel(self.root)
label=Tkinter.Frame(self.top)
Tkinter.Label(label, justify=CENTER, text="Uniform density:\nf(x) = 1/|start-end| for start<x<end\nelse f(x) = 0").grid(row=0, columnspan=2)
Tkinter.Label(label, text="start = ").grid(row=1, sticky=E)
Tkinter.Label(label, text="end = ").grid(row=2, sticky=E)
self.e1=Tkinter.Entry(label)
self.e2=Tkinter.Entry(label)
self.e1.insert(0, -0.5)
self.e2.insert(0, 0.5)
self.e1.grid(row=1, column=1)
self.e2.grid(row=2, column=1)
button1=Tkinter.Button(label, text="OK", command=self.boxc).grid(row=3)
button2=Tkinter.Button(label, text="Cancel", command=self.top.destroy).grid(row=3, column=1)
label.pack()
def boxc(self):
s1 = string.atof(self.e1.get())
s2 = string.atof(self.e2.get())
self.create_new_fkt(
ObjectHMM.UniformDensity(start=s1,stop=s2,a=1/math.fabs(s2-s1),
color=self.nextColor()))
self.top.destroy()
def gaussadd(self):
self.top=Tkinter.Toplevel(self.root)
label=Tkinter.Frame(self.top)
Tkinter.Label(label, justify=CENTER, text="Normal density:\n f(x) = 1/(sigma*sqrt(2*pi))*exp(-(x-mu)**2/2*(sigma)**2)").grid(row=0, columnspan=2)
Tkinter.Label(label, text="mu = ").grid(row=1, sticky=E)
Tkinter.Label(label, text="sigma = ").grid(row=2, sticky=E)
Tkinter.Label(label, text="").grid(row=3, sticky=E)
self.e1=Tkinter.Entry(label)
self.e2=Tkinter.Entry(label)
self.e1.insert(0, 0.0)
self.e2.insert(0, 2.0)
self.e1.grid(row=1, column=1)
self.e2.grid(row=2, column=1)
button1=Tkinter.Button(label, text="OK", command=self.gauss).grid(row=4)
button2=Tkinter.Button(label, text="Cancel", command=self.top.destroy).grid(row=4, column=1)
self.label = label
label.pack()
def gauss(self):
mu = string.atof(self.e1.get())
sigma = string.atof(self.e2.get())
if sigma <= 0:
Tkinter.Label(self.label, text="Error! sigma has to be greater than 0,",
foreground="red").grid(row=3, columnspan=2)
self.label.pack()
return
self.create_new_fkt(
ObjectHMM.NormalDensity(mu=mu,sigma=sigma,a=1.0,color=self.nextColor()))
del self.label
self.top.destroy()
def gaussladd(self):
self.top=Tkinter.Toplevel(self.root)
label=Tkinter.Frame(self.top)
Tkinter.Label(label, justify=CENTER, text="Normal density (left tail):\n f(x) = 1/(sigma*sqrt(2*pi))*exp(-(x-mu)**2/2*(sigma)**2) for x<=tail\n else f(x)=0").grid(row=0, columnspan=2, sticky=E)
Tkinter.Label(label, text="mu = ").grid(row=1, sticky=E)
Tkinter.Label(label, text="sigma = ").grid(row=2, sticky=E)
Tkinter.Label(label, text="tail = ").grid(row=3, sticky=E)
Tkinter.Label(label, text="").grid(row=4, sticky=E)
self.e1=Tkinter.Entry(label)
self.e2=Tkinter.Entry(label)
self.e3=Tkinter.Entry(label)
self.e1.insert(0, -1.0)
self.e2.insert(0, 1.0)
self.e3.insert(0, 0.5)
self.e1.grid(row=1, column=1)
self.e2.grid(row=2, column=1)
self.e3.grid(row=3, column=1)
button1=Tkinter.Button(label, text="OK", command=self.gaussl).grid(row=5)
button2=Tkinter.Button(label,text="Cancel",command=self.top.destroy).grid(row=5, column=1)
self.label = label
label.pack()
def gaussl(self):
mu = string.atof(self.e1.get())
sigma = string.atof(self.e2.get())
tail = string.atof(self.e3.get())
if sigma <= 0:
Tkinter.Label(self.label, text="Error! sigma has to be greater than 0,",
foreground="red").grid(row=4, columnspan=2)
self.label.pack()
return
self.create_new_fkt(
ObjectHMM.NormalDensityTruncLeft(mu=mu,sigma=sigma,tail=tail,a=1.0,
color=self.nextColor()))
del self.label
self.top.destroy()
def gaussradd(self):
self.top=Tkinter.Toplevel(self.root)
label=Tkinter.Frame(self.top)
Tkinter.Label(label, justify=CENTER, text="Normal density (right tail):\n f(x) = 1/(sigma*sqrt(2*pi))*exp(-(x-mu)**2/2*(sigma)**2) for x>=tail\n else f(x)=0").grid(row=0, columnspan=2, sticky=E)
Tkinter.Label(label, text="mu = ").grid(row=1, sticky=E)
Tkinter.Label(label, text="sigma = ").grid(row=2, sticky=E)
Tkinter.Label(label, text="tail = ").grid(row=3, sticky=E)
Tkinter.Label(label, text="").grid(row=4, sticky=E)
self.e1=Tkinter.Entry(label)
self.e2=Tkinter.Entry(label)
self.e3=Tkinter.Entry(label)
self.e1.insert(0, 1.0)
self.e2.insert(0, 1.0)
self.e3.insert(0, -0.5)
self.e1.grid(row=1, column=1)
self.e2.grid(row=2, column=1)
self.e3.grid(row=3, column=1)
button1=Tkinter.Button(label, text="OK", command=self.gaussr).grid(row=5)
button2=Tkinter.Button(label,text="Cancel",command=self.top.destroy).grid(row=5, column=1)
self.label = label
label.pack()
def gaussr(self):
mu = string.atof(self.e1.get())
sigma = string.atof(self.e2.get())
tail = string.atof(self.e3.get())
if sigma <= 0:
Tkinter.Label(self.label, text="Error! sigma has to be greater than 0,",
foreground="red").grid(row=4, columnspan=2)
self.label.pack()
return
self.create_new_fkt(
ObjectHMM.NormalDensityTruncRight(mu=mu,sigma=sigma,tail=tail,a=1.0,
color=self.nextColor()))
del self.label
self.top.destroy()
def success(self):
return self.status
class HMMTypeChooser(tkSimpleDialog.Dialog):
def buttonbox(self):
# Stolen from tkSimpleDialog.py
# add standard button box. override if you don't want the
# standard buttons
box = Frame(self)
b_open = Button(box, text="Open existing HMM", width=30, command=self.open, default=ACTIVE)
b_open.pack(side=TOP, padx=5, pady=5)
b_discrete = Button(box, text="New HMM with discrete emissions", width=30, command=self.discrete)
b_discrete.pack(side=TOP, padx=5, pady=5)
b_continuous = Button(box, text="New HMM with continuous emissions", width=30, command=self.continuous)
b_continuous.pack(side=BOTTOM, padx=5, pady=5)
box.pack(side=BOTTOM,fill=X)
def open(self):
self.etype = 'open'
self.ok()
def discrete(self):
self.etype = 0
self.ok()
def continuous(self):
self.etype = 1
self.ok()
def body(self, master):
self.title("New HMM")
class HMMGraphEditor(SAGraphEditor):
def __init__(self, master=None):
self.modeltype = 0;
SAGraphEditor.__init__(self, master)
GatoGlobals.AnimationParameters.cEdgeDefault = '#888888'
GatoGlobals.AnimationParameters.cLabelDefault = 'black' #'#FF8000' # 230 215 0
GatoGlobals.AnimationParameters.cVertexDefault = 'red' #'#007EE0' # 0 125 230
self.SetTitle("HMMEd _VERSION_")
self.G = ObjectHMM.ObjectHMM(ObjectHMM.State, ObjectHMM.Transition)
def makeMenuBar(self):
self.menubar = Menu(self,tearoff=0)
# Add file menu
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenu.add_command(label='New', command=self.NewGraph)
self.fileMenu.add_command(label='Open ...', command=self.OpenGraph)
self.fileMenu.add_command(label='Save', command=self.SaveGraph)
self.fileMenu.add_command(label='Save as ...', command=self.SaveAsGraph)
self.fileMenu.add_separator()
self.fileMenu.add_command(label='Export EPSF...', command=self.ExportEPSF)
self.fileMenu.add_separator()
self.fileMenu.add_command(label='Quit', command=self.Quit)
self.menubar.add_cascade(label="File", menu=self.fileMenu, underline=0)
# Add graph menu
self.graphMenu = Menu(self.menubar, tearoff=0)
self.graphMenu.add_command(label='Edit Prior', command=self.EditPrior)
if self.modeltype & ghmmwrapper.kDiscreteHMM:
self.graphMenu.add_command(label='Edit Alphabet', command=self.EditAlphabet)
if self.modeltype & ghmmwrapper.kLabeledStates:
self.graphMenu.add_command(label='Edit class labels', command=self.EditClassLabels)
if self.modeltype & ghmmwrapper.kBackgroundDistributions:
self.graphMenu.add_command(label='Edit background distributions', command=self.EditBackgroundDistributions)
if self.modeltype & ghmmwrapper.kTiedEmissions:
self.graphMenu.add_command(label='Edit tie groups', command=self.EditTieGroups)
self.graphMenu.add_separator()
self.graphMenu.add_checkbutton(label='Grid', command=self.ToggleGridding)
self.menubar.add_cascade(label="HMM", menu=self.graphMenu, underline=0)
self.master.configure(menu=self.menubar)
def SetGraphMenuOptions(self):
if not self.gridding:
self.graphMenu.invoke(self.graphMenu.index('Grid'))
def CreateWidgets(self):
toolbar = Frame(self, cursor='hand2', relief=FLAT)
toolbar.pack(side=LEFT, fill=Y) # Allows horizontal growth
extra = Frame(toolbar, cursor='hand2', relief=SUNKEN, borderwidth=2)
extra.pack(side=TOP) # Allows horizontal growth
extra.rowconfigure(5,weight=1)
extra.bind("<Enter>", lambda e, gd=self:gd.DefaultInfo())
px = 0
py = 3
self.toolVar = StringVar()
self.lastTool = None
# Load Icons
# 0 = "inactive", 1 = "mouse over", 2 = "active"
self.icons = {
'AddOrMoveVertex':[PhotoImage(data=GatoIcons.vertex_1),
PhotoImage(data=GatoIcons.vertex_2),
PhotoImage(data=GatoIcons.vertex_3)],
'AddEdge':[PhotoImage(data=GatoIcons.edge_1),
PhotoImage(data=GatoIcons.edge_2),
PhotoImage(data=GatoIcons.edge_3)],
'DeleteEdgeOrVertex':[PhotoImage(data=GatoIcons.delete_1),
PhotoImage(data=GatoIcons.delete_2),
PhotoImage(data=GatoIcons.delete_3)],
'SwapOrientation':[PhotoImage(data=GatoIcons.swap_1),
PhotoImage(data=GatoIcons.swap_2),
PhotoImage(data=GatoIcons.swap_3)],
'EditWeight':[PhotoImage(data=GatoIcons.edit_1),
PhotoImage(data=GatoIcons.edit_2),
PhotoImage(data=GatoIcons.edit_3)],
'EditProperties':[PhotoImage(data=GatoIcons.edit_1),
PhotoImage(data=GatoIcons.edit_2),
PhotoImage(data=GatoIcons.edit_3)] }
self.buttons = {}
values = ['AddOrMoveVertex','AddEdge','DeleteEdgeOrVertex',
'SwapOrientation','EditWeight', 'EditProperties']
text = {'AddOrMoveVertex':'Add or move vertex','AddEdge':'Add edge',
'DeleteEdgeOrVertex':'Delete edge or vertex',
'SwapOrientation':'Swap orientation','EditWeight':'Edit Weight',
'EditProperties':'Edit Properties'}
row = 0
for val in values:
b = Radiobutton(extra, width=32, padx=px, pady=py,
text=text[val],
command=self.ChangeTool,
var = self.toolVar, value=val,
indicator=0, image=self.icons[val][0],
selectcolor="#AFAFAF",)
b.grid(row=row, column=0, padx=2, pady=2)
self.buttons[val] = b
b.bind("<Enter>", lambda e,gd=self:gd.EnterButtonCallback(e))
b.bind("<Leave>", lambda e,gd=self:gd.LeaveButtonCallback(e))
row += 1
self.defaultButton = self.buttons['AddOrMoveVertex']
# default doesnt work as config option
GraphEditor.CreateWidgets(self)
def MouseUp(self,event):
if self.mode == 'AddOrMoveVertex':
self.AddOrMoveVertexUp(event)
elif self.mode == 'AddEdge':
self.AddEdgeUp(event)
elif self.mode == 'DeleteEdgeOrVertex':
self.DeleteEdgeOrVertexUp(event)
elif self.mode == 'SwapOrientation':
self.SwapOrientationUp(event)
elif self.mode == 'EditWeight':
self.EditWeightUp(event)
elif self.mode == 'EditProperties':
self.EditPropertiesUp(event)
def NewGraph(self):
d = HMMTypeChooser(self.master)
if d.etype == 'open':
self.OpenGraph()
return
else:
self.G = ObjectHMM.ObjectHMM(ObjectHMM.State, ObjectHMM.Transition, etype=d.etype)
self.G.edit(self.master)
self.graphName = "New"
self.ShowGraph(self.G, self.graphName)
#self.RegisterGraphInformer(HMMInformer(self.G))
self.fileName = None
self.SetTitle("HMMEd _VERSION_")
self.modeltype = self.G.modelType
self.makeMenuBar()
self.SetGraphMenuOptions()
def OpenGraph(self):
file = askopenfilename(title="Open HMM",
defaultextension=".xml",
filetypes = (("XML", ".xml"),
)
)
if file is "":
print "cancelled"
else:
self.fileName = file
self.graphName = stripPath(file)
e = extension(file)
if e == 'xml':
self.G.openXML(file)
else:
print "Unknown extension"
return
self.ShowGraph(self.G, self.graphName)
#self.RegisterGraphInformer(HMMInformer(self.HMM))
self.SetTitle("HMMEd _VERSION_ - " + self.graphName)
self.modeltype = self.G.modelType
self.makeMenuBar()
if not self.gridding:
self.graphMenu.invoke(self.graphMenu.index('Grid'))
def SaveGraph(self):
#self.dirty = 0
if self.fileName != None:
self.G.writeXML(self.fileName)
else:
self.SaveAsGraph()
def SaveAsGraph(self):
file = asksaveasfilename(title="Save HMM",
defaultextension=".xml",
filetypes = ( ("XML", ".xml"),
)
)
if file is not "":
self.fileName = file
self.dirty = 0
self.G.writeXML(self.fileName)
self.graphName = stripPath(file)
self.SetTitle("HMMEd _VERSION_ - " + self.graphName)
def EditAlphabet(self):
if self.G.modelType & ghmmwrapper.kHigherOrderEmissions and self.G.Order() > 0:
print "not implemented, edit the alphabet before adding states"
else:
self.G.alphabet.editDialog(self, self.G)
def EditPrior(self):
if self.G.Order() == 0:
return
emission_probabilities = ProbEditorBasics.ProbDict({})
for state in self.G.vertices.values():
label = str(state.id)
weight = state.initial
emission_probabilities.update({label:weight})
emission_probabilities.renorm_to(1.0)
e = ProbEditorBasics.emission_data(emission_probabilities)
d = ProbEditorDialogs.emission_dialog(self, e, "initial probabilities")
if d.success():
# write back normalized probabilities
emission_probabilities.renorm_to(1.0)
for key in emission_probabilities.keys():
state = self.G.vertices[int(key)]
state.initial = typed_assign(state.initial, emission_probabilities[key])
def EditClassLabels(self):
self.G.label_alphabet.editDialog(self, self.G)
def EditBackgroundDistributions(self):
self.G.backgroundDistributions.editDialog(self, self.G)
def EditTieGroups(self):
self.G.tie_groups.editDialog(self, self.G)
def EditWeightUp(self,event):
if event.widget.find_withtag(CURRENT):
widget = event.widget.find_withtag(CURRENT)[0]
tags = self.canvas.gettags(widget)
if "edges" in tags:
(tail,head) = self.edge[widget]
transition_probabilities=ProbEditorBasics.ProbDict({})
for head in self.G.OutNeighbors(tail):
weight=self.G.GetEdgeWeight(0,tail,head)
label = "-> %d" % head
transition_probabilities.update({label:weight})
if transition_probabilities.sum==0:
key_list=transition_probabilities.keys()
for key in key_list:
transition_probabilities[key]=1.0/len(key_list)
e = ProbEditorBasics.emission_data(transition_probabilities)
d = ProbEditorDialogs.emission_dialog(self, e,
"transition probs from state %d" % tail)
if d.success():
# write back normalized probabilities
for key in | |
de la base pour SPMF (indépendent)
print('* Calcul des séquences')
for niv in dbDiv.keys() :
for year in dbDiv[niv].keys() :
sqc = [['8888/', '0j0/', ['<0> 8888']]] # indice d'entrée, DMS nulle
n = 1 # indice de rang de l'item
for j in range(0, len(dbDiv[niv][year])-1) :
t = dbDiv[niv][year][j][3] # duree de séjour
# dernière séquence
if j == len(dbDiv[niv][year])-2 :
tbis = dbDiv[niv][year][j+1][3]
sqc[-1][1] += str(t.days) + 'j' + str(round(t.seconds / 60)) + \
'/' + str(tbis.days) + 'j' + str(round(tbis.seconds / 60)) + '/0j0'
if dbDiv[niv][year][j][1] == '0000':
sqc[-1][2].extend(['<' + str(n) + '> 1000 <' + \
str(n+1) + '> ' + dbDiv[niv][year][j+1][1], '<' + str(n+2) + '> '])
sqc[-1][0] += '1000/'
else :
sqc[-1][2].extend(['<' + str(n) + '> ' + dbDiv[niv][year][j][1],
'<' + str(n+1) + '> ' + dbDiv[niv][year][j+1][1], '<' + str(n+2) + '> 9999'])
sqc[-1][0] += dbDiv[niv][year][j][1] + '/'
if dbDiv[niv][year][j+1][1] == '0000':
sqc[-1][0] += '1000/9999'
else :
sqc[-1][0] += dbDiv[niv][year][j+1][1] + '/9999' # indice de sortie
# corps de la séquence
elif dbDiv[niv][year][j][0] == dbDiv[niv][year][j+1][0] :
# ajout d'un item "durée"
sqc[-1][1] += str(t.days) + 'j' + str(round(t.seconds / 60)) + '/'
# ajout d'un item "code"
if dbDiv[niv][year][j][1] == '0000': # bloc (modification du code pour SPMF qui ne supporte pas 0000)
# format "/"
sqc[-1][0] += '1000/'
# format SPMF
sqc[-1][2].append('<' + str(n) + '> 1000')
else :
sqc[-1][0] += dbDiv[niv][year][j][1] + '/'
sqc[-1][2].append('<' + str(n) + '> ' + dbDiv[niv][year][j][1])
n += 1
# fin de la séquence => nouvelle séquence
else :
sqc[-1][1] += str(t.days) + 'j' + str(round(t.seconds / 60)) + '/0j0'
if dbDiv[niv][year][j][1] == '0000':
sqc[-1][2].extend(['<' + str(n) + '> 1000', '<' + str(n+1) + '> 9999'])
sqc[-1][0] += '1000/9999'
else :
sqc[-1][2].extend(['<' + str(n) + '> ' + dbDiv[niv][year][j][1], '<' + str(n+1) + '> 9999'])
sqc[-1][0] += dbDiv[niv][year][j][1] + '/9999'
sqc.append(['8888/', '0j0/', ['<0> 8888']])
n = 1
print('* exports', niv, year)
with open('sqc-'+niv+year, 'w') as fichier :
# format SPMF, séquence des codes
for s in sqc :
fichier.write(' -1 '.join(s[2])+' -1 -2\n')
with open('sqc-'+niv+year+'duree', 'w') as fichier :
# format "/", séquence des codes et des DMS
for s in sqc :
fichier.write(s[0] + ';' + s[1] + '\n')
# FORMAT :
# sqc = [[suite des codes séparés par '/', suite des DMS séparées par '/',
# suite des codes au format SPMF : séparation des items par -1 <x> et des séquences par -1 -2]]
### MODULE 3 : Graphique en aires empilées
print('')
print("! MODULE 3 !")
# liste des codes
print('* Codes')
lsCodes = {}
for niv in dbDiv.keys() :
lsCodes[niv] = {}
for year in dbDiv[niv].keys() :
lsCodes[niv][year] = {}
for i in range(0, len(dbDiv[niv][year])) :
if dbDiv[niv][year][i][1] not in lsCodes[niv][year].keys() :
lsCodes[niv][year][dbDiv[niv][year][i][1]] = [dbDiv[niv][year][i][0]]
elif dbDiv[niv][year][i][0] not in lsCodes[niv][year][dbDiv[niv][year][i][1]] :
lsCodes[niv][year][dbDiv[niv][year][i][1]].append(dbDiv[niv][year][i][0])
# FORMAT :
# lsCodes = {niveau : {année : {code : [venues]}}}
# ~ # structures des um (usage extérieur)
# ~ struc = {}
# ~ for year in annees :
# ~ for code in lsCodes['um'][year] :
# ~ if code not in struc.keys() :
# ~ struc[code] = {'lib' : '', niveaux[1] : [], niveaux[2] : [], niveaux[3] : []}
# ~ for i in range(0, len(db)) :
# ~ if struc[db[i][2]].get('serv') == [] :
# ~ struc[db[i][2]]['lib'] = db[i][3]
# ~ struc[db[i][2]][niveaux[1]] = [db[i][8], db[i][9]]
# ~ struc[db[i][2]][niveaux[2]] = [db[i][10], db[i][11]]
# ~ struc[db[i][2]][niveaux[3]] = [db[i][6], db[i][7]]
# ~ # FORMAT :
# ~ # struc = {code UM : {libellé, [code service, libellé service],
# ~ # [code pôle, libellé pôle], [code autorisation, libellé autorisation]}}
# ~ with open('structure.csv', 'w') as fichier :
# ~ fichier.write('code_um;lib_um;code_serv;lib_serv;code_pole;lib_pole;code_autoris;lib_autoris\n')
# ~ for um in struc :
# ~ fichier.write(';'.join([um, struc[um]['lib'], struc[um]['serv'][0],
# ~ struc[um]['serv'][1], struc[um]['pole'][0], struc[um]['pole'][1],
# ~ struc[um]['autoris'][0], struc[um]['autoris'][1]]) + '\n')
# agrégation des codes peu fréquents (<x% des venues) dans le code "autre"
print('* Agregation')
for niv in lsCodes.keys() :
seuil = 0
for year in lsCodes[niv].keys() :
for ls in lsCodes[niv][year].values() :
if len(ls) > seuil :
seuil = len(ls)
seuil *= 0.05 # seuil à 5% des venues
i = 1
for code, ls in lsCodes[niv][year].items() :
if len(ls) < seuil :
lsCodes[niv][year][code] = False
elif code != '0000' :
lsCodes[niv][year][code] = i
i += 1
lsCodes[niv][year]['0000'] = 0
lsCodes[niv][year]['autre'] = i
lsCodes[niv][year]['table'] = []
if niv == niveaux[0] :
print("seuil", year, seuil)
# FORMAT :
# lsCodes = {niveau : {année : {code : booléen d'agrégation (F) ou indice de colonne}}}
# création de la base agrégée à partir de la base de codes
for niv in dbDiv.keys() :
for year in dbDiv[niv].keys() :
libelle = [[0, '0000']]
for code in lsCodes[niv][year].keys() :
if lsCodes[niv][year][code] and code != 'table' :
libelle.append([lsCodes[niv][year].get(code), str(code)])
iAutre = libelle[-1][0]
libelle.sort
print(niv, year, iAutre, "libelles")
for i in range(0, len(dbDiv[niv][year])) :
# mesure de la durée de séjour :
if i == 0 or dbDiv[niv][year][i][0] != dbDiv[niv][year][i-1][0]: # première ligne ou nouvelle venue
h = 1 # indice d'heure, démarrage à 1 pour exploitation extérieur dans R
d = 0 # indice de jour, démarrage à 0 pour javascript
dbDiv[niv][year][i].append(h) # indice de l'heure de début
dbDiv[niv][year][i].append(d) # indice du jour de début
h += dbDiv[niv][year][i][3].days * 24 + dbDiv[niv][year][i][3].seconds / 3600 # durée du RUM en heures
if dbDiv[niv][year][i][3].days < 1 : # durée du RUM en jours
d += 1
elif dbDiv[niv][year][i][3].seconds / 3600 > 12 :
d += dbDiv[niv][year][i][3].days + 1
else :
d += dbDiv[niv][year][i][3].days
dbDiv[niv][year][i].append(h) # indice de l'heure de fin
dbDiv[niv][year][i].append(d) # indice du jour de fin
# remplissage de la base de code
if dbDiv[niv][year][i][8]> len(lsCodes[niv][year]['table']) : # ajout de nouvelles lignes vides
while dbDiv[niv][year][i][8] > len(lsCodes[niv][year]['table']) :
lsCodes[niv][year]['table'].append({})
for lib in libelle :
lsCodes[niv][year]['table'][-1][lib[0]] = 0 # { indice : n}
for j in range(dbDiv[niv][year][i][6], dbDiv[niv][year][i][8]) :
indice = lsCodes[niv][year].get(dbDiv[niv][year][i][1])
if lsCodes[niv][year][dbDiv[niv][year][i][1]] or dbDiv[niv][year][i][1] == '0000' : # code non regroupé dans 'autre' ou bloc (indice 0)
lsCodes[niv][year]['table'][j][indice] += 1
else :
lsCodes[niv][year]['table'][j][iAutre] += 1
print('export', niv, year)
with open('area_' + niv + year + '.csv', 'w') as fichier :
j=0
fichier.write('jour,')
for i in range(0, len(libelle)) :
libelle[i] = libelle[i][1]
fichier.write(','.join(libelle) + '\n')
# info sur les 90 premiers jours d'hospit
if len(lsCodes[niv][year]['table']) < 91 :
l = len(lsCodes[niv][year]['table'])
else :
l = 91
for i in range(0, l) :
fichier.write(str(j) + ',')
for lib in range(0, len(libelle) - 1) :
fichier.write(str(lsCodes[niv][year]['table'][i][lib]) + ',')
fichier.write(str(lsCodes[niv][year]['table'][i][len(libelle)-1]) + '\n')
j += 1
# FORMAT :
# dbDiv = {niveau : {année : [venue, code, libellé, DMS, position, H début, j début, H fin, j fin]}}
# ~ # export de la base UM agrégée (utilisation externe)
# ~ # en jours
# ~ with open('db_um.csv', 'w') as fichier :
# ~ fichier.write('venue,code_um,lib_um,debut,fin\n')
# ~ for i in range(0, len(dbDiv['um']['2016'])) :
# ~ if lsCodes['um']['2016'][dbDiv['um']['2016'][i][1]] or dbDiv['um']['2016'][i][1] == '0000' :
# ~ fichier.write(','.join([','.join(dbDiv['um']['2016'][i][0:3]),
# ~ str(dbDiv['um']['2016'][i][6]), str(dbDiv['um']['2016'][i][8])])+ '\n')
# ~ else :
# ~ fichier.write(','.join([dbDiv['um']['2016'][i][0], 'autre', 'autre',
# ~ str(dbDiv['um']['2016'][i][6]), str(dbDiv['um']['2016'][i][8])])+ '\n')
# ~ for i in range(0, len(dbDiv['um']['2017'])) :
# ~ if lsCodes['um']['2017'][dbDiv['um']['2017'][i][1]] or dbDiv['um']['2017'][i][1] == '0000' :
# ~ fichier.write(','.join([','.join(dbDiv['um']['2017'][i][0:3]),
# ~ str(dbDiv['um']['2017'][i][6]), str(dbDiv['um']['2017'][i][8])])+ '\n')
# ~ else :
# ~ fichier.write(','.join([dbDiv['um']['2017'][i][0], 'autre', 'autre',
# ~ str(dbDiv['um']['2017'][i][6]), str(dbDiv['um']['2017'][i][8])])+ '\n')
# ~ # en heures
# ~ with open('db_um2.csv', 'w') as fichier :
# ~ fichier.write('venue,code_um,lib_um,debut,fin\n')
# ~ for i in range(0, len(dbDiv['um']['2016'])) :
# ~ if lsCodes['um']['2016'][dbDiv['um']['2016'][i][1]] or dbDiv['um']['2016'][i][1] == '0000' :
# ~ fichier.write(','.join([','.join(dbDiv['um']['2016'][i][0:3]),
# ~ '%.2f' %(dbDiv['um']['2016'][i][5]), '%.2f' %(dbDiv['um']['2016'][i][7])])+ '\n')
# ~ else :
# ~ fichier.write(','.join([dbDiv['um']['2016'][i][0], 'autre', 'autre',
# ~ '%.2f' %(dbDiv['um']['2016'][i][5]), '%.2f' %(dbDiv['um']['2016'][i][7])])+ '\n')
# ~ for i in range(0, len(dbDiv['um']['2017'])) :
# ~ if lsCodes['um']['2017'][dbDiv['um']['2017'][i][1]] or dbDiv['um']['2017'][i][1] == '0000' :
# ~ fichier.write(','.join([','.join(dbDiv['um']['2017'][i][0:3]),
# ~ '%.2f' %(dbDiv['um']['2017'][i][5]), '%.2f' %(dbDiv['um']['2017'][i][7])])+ '\n')
# ~ else :
# ~ fichier.write(','.join([dbDiv['um']['2017'][i][0], 'autre', 'autre',
# ~ '%.2f' %(dbDiv['um']['2017'][i][5]), '%.2f' %(dbDiv['um']['2017'][i][7])])+ '\n')
# ~ # export base UM sans les | |
<filename>gui/tkgui.py
#! /usr/bin/env python3
import logging
import base64
import os
import shutil
import json
import urllib.request
from .icons import icon_string
from tkinter import *
from tkinter import filedialog, messagebox, ttk
from tkinter import TclError
from slib.helpers import InterfaceHelper
from data.filegroups import typeGroups
from webbrowser import get
from tkinter import font
from . import descriptions
from data.version import SORTER_VERSION
from datetime import datetime
logger = logging.getLogger(__name__)
class TextWithVar(Text):
"""A Text widget that accepts a 'textvariable' option
Has no scrollbar but is scrollable
Thanks to https://stackoverflow.com/q/21507178/6735826
and https://stackoverflow.com/a/21565476/6735826
"""
def __init__(self, parent, *args, **kwargs):
self._textvariable = kwargs.pop("textvariable", None)
self.autoscroll = kwargs.pop('autoscroll', True)
super().__init__(parent, *args, **kwargs)
# if the variable has data in it, use it to initialize
# the widget
if self._textvariable is not None:
self.insert("1.0", self._textvariable.get())
# this defines an internal proxy which generates a
# virtual event whenever text is inserted or deleted
self.tk.eval('''
proc widget_proxy {widget widget_command args} {
# call the real tk widget command with the real args
set result [uplevel [linsert $args 0 $widget_command]]
# if the contents changed, generate an event we can bind to
if {([lindex $args 0] in {insert replace delete})} {
event generate $widget <<Change>> -when tail
}
# return the result from the real widget command
return $result
}
''')
# this replaces the underlying widget with the proxy
self.tk.eval('''
rename {widget} _{widget}
interp alias {{}} ::{widget} {{}} widget_proxy {widget} _{widget}
'''.format(widget=str(self)))
# set up a binding to update the variable whenever
# the widget changes
self.bind("<<Change>>", self._on_widget_change)
# set up a trace to update the text widget when the
# variable changes
if self._textvariable is not None:
self._textvariable.trace("wu", self._on_var_change)
def _on_var_change(self, *args):
"""Change the text widget when the associated textvariable changes"""
# only change the widget if something actually
# changed, otherwise we'll get into an endless
# loop
try:
text_current = self.get("1.0", "end-1c")
except TclError:
pass
else:
var_current = self._textvariable.get()
if text_current != var_current:
self.delete("1.0", "end")
self.insert("1.0", var_current)
if self.autoscroll:
self.see(END)
def _on_widget_change(self, event=None):
"""Change the variable when the widget changes"""
if self._textvariable is not None:
self._textvariable.set(self.get("1.0", "end-1c"))
class TextFrame(Frame):
"""Text widget in a scrollable Frame which accepts textvariable"""
def __init__(self, master, *args, **kwargs):
self.textvariable = kwargs.pop('textvariable', None)
self.autoscroll = kwargs.pop('autoscroll', True)
if self.textvariable is not None:
if not isinstance(self.textvariable, Variable):
raise TypeError("tkinter.Variable type expected, {} given.".format(type(self.textvariable)))
super().__init__(master, *args, **kwargs)
self.yscrollbar = ttk.Scrollbar(self, orient=VERTICAL)
self.text_widget = TextWithVar(self, textvariable=self.textvariable,
autoscroll=self.autoscroll,
yscrollcommand=self.yscrollbar.set)
self.yscrollbar.config(command=self.text_widget.yview)
self.yscrollbar.pack(side=RIGHT, fill=Y)
self.text_widget.pack(side=LEFT, fill=BOTH, expand=1)
class TkGui(Tk):
"""Sorter tkinter GUI class"""
def __init__(self, operations, settings):
super().__init__()
self.settings = settings
self.debug = True if logger.getEffectiveLevel() == logging.DEBUG else False
self.title('Sorter')
self.protocol("WM_DELETE_WINDOW", self._show_exit_dialog)
# Configure icon
icondata = base64.b64decode(icon_string) # utf-8 encoded
self.icon = PhotoImage(data=icondata)
self.tk.call('wm', 'iconphoto', self._w, self.icon)
# Configure main window
self.resizable(width=False, height=False)
self.maxsize(550, 380)
self.minsize(550, 200)
self.geometry('{0}x{1}+{2}+{3}'.format(550, 300, 200, 200))
self.operations = operations
self.db_helper = self.operations.db_helper
self._init_ui()
def _init_ui(self):
# Configure default theme
style = ttk.Style(self)
style.theme_use('clam')
style.map("TEntry", fieldbackground=[
("active", "white"), ("disabled", "#DCDCDC")])
self.bg = self.cget('bg')
style.configure('My.TFrame', background=self.bg)
style.configure("blue.Horizontal.TProgressbar",
background='#778899', troughcolor=self.bg)
style.configure("green.Horizontal.TProgressbar",
background='#2E8B57', troughcolor=self.bg)
self.option_add('*Dialog.msg.font', 'Helvetica 10')
# Configure menubar
menu = Menu(self)
self.config(menu=menu)
# File menu item
file_menu = Menu(menu, tearoff=False)
menu.add_cascade(label='File', menu=file_menu)
dir_submenu = Menu(file_menu, tearoff=False)
dir_submenu.add_command(
label='Source', command=lambda: self._show_diag('source'))
dir_submenu.add_command(label='Destination',
command=lambda: self._show_diag('destination'))
file_menu.add_cascade(label='Open', menu=dir_submenu)
file_menu.add_separator()
file_menu.add_command(label='Quit', command=self._show_exit_dialog,
accelerator="Ctrl+Q")
# View menu item
view_menu = Menu(menu, tearoff=False)
menu.add_cascade(label='View', menu=view_menu)
view_menu.add_command(label='History', command=self._show_history)
# Help menu item
help_menu = Menu(menu, tearoff=False)
menu.add_cascade(label='Help', menu=help_menu)
help_menu.add_command(
label='Help', command=self._show_help, accelerator='F1')
usage_link = descriptions.HOMEPAGE + '#usage'
help_menu.add_command(
label='Tutorial', command=lambda link=usage_link: self._open_link(link))
help_menu.add_command(label='Refresh', command=self._delete_db)
help_menu.add_command(
label='Update', command=lambda: self._check_for_update(user_checked=True))
help_menu.add_command(label='About', command=self._show_about)
self.bind_all('<F1>', self._show_help)
self.bind_all(
'<Control-q>', lambda event=None: self._show_exit_dialog())
# Create main frames
self.top_frame = ttk.Frame(self, style='My.TFrame')
self.top_frame.pack(side=TOP, expand=YES, fill=X)
self.mid_frame = ttk.Frame(self, style='My.TFrame')
self.mid_frame.pack(side=TOP, expand=YES, fill=BOTH)
self.bottom_frame = ttk.Frame(self, style='My.TFrame')
self.bottom_frame.pack(side=TOP, expand=YES, fill=X)
# Configure frame for Label widgets
label_frame = ttk.Frame(self.top_frame, style='My.TFrame')
label_frame.pack(side=LEFT, fill=Y, expand=YES)
source_label = ttk.Label(
label_frame, text='Source', anchor=W, background=self.bg)
source_label.pack(ipady=2.5, pady=5, side=TOP, fill=X)
dst_label = ttk.Label(
label_frame, text='Destination', anchor=W, background=self.bg)
dst_label.pack(ipady=2.5, pady=5, side=BOTTOM, fill=X)
# Configure frame for Entry widgets
entry_frame = ttk.Frame(self.top_frame, style='My.TFrame')
entry_frame.pack(side=LEFT, fill=Y, expand=YES)
self.source_entry = ttk.Entry(entry_frame, width=50)
self.source_entry.pack(ipady=2.5, pady=5, side=TOP, expand=YES)
self.dst_entry = ttk.Entry(entry_frame, width=50, state='disabled')
self.dst_entry.pack(ipady=2.5, pady=5, side=BOTTOM, expand=YES)
self.dst_entry.bind('<FocusIn>', lambda event,
widget=self.dst_entry,
variable=self.dst_entry: self._clear_entry_help(widget, variable))
self.dst_entry.bind('<FocusOut>', lambda event,
widget=self.dst_entry,
variable=self.dst_entry: self._show_entry_help(widget, variable))
# Configure frame for dialog buttons
diag_frame = ttk.Frame(self.top_frame, style='My.TFrame')
diag_frame.pack(side=LEFT, expand=YES)
source_button = ttk.Button(diag_frame,
text='Choose',
command=lambda: self._show_diag('source'))
source_button.pack(side=TOP, pady=5)
dst_button = ttk.Button(diag_frame,
text='Choose',
command=lambda: self._show_diag('destination'))
dst_button.pack(side=BOTTOM, pady=5)
# Variables
self.sort_folders = IntVar()
self.recursive = IntVar()
types_value = IntVar()
self.file_types = ['*']
self.by_extension = IntVar()
self.progress_info = StringVar()
self.show_logs = IntVar()
# Configure Options frame
options_frame = LabelFrame(self.mid_frame, text='Options')
options_frame.pack(fill=BOTH, expand=YES, padx=5, pady=10)
frame_left = ttk.Frame(options_frame, style="My.TFrame")
frame_left.pack(side=LEFT, fill=Y, anchor=W, padx=20)
frame_right = ttk.Frame(options_frame, style="My.TFrame")
frame_right.pack(side=LEFT, fill=Y, anchor=W, padx=10)
# For frame_right
group_separator = ttk.Separator(frame_left)
group_separator.grid(row=0, column=0, pady=1)
self.search_string = StringVar()
search_entry = ttk.Entry(
frame_left, width=15, state='disabled', textvariable=self.search_string)
search_entry.grid(row=1, column=1, padx=5, pady=2)
search_entry.bind('<FocusIn>', lambda event,
widget=search_entry,
variable=self.search_string: self._clear_entry_help(widget, variable))
search_entry.bind('<FocusOut>', lambda event,
widget=search_entry,
variable=self.search_string: self._show_entry_help(widget, variable))
self.search_option_value = IntVar()
search_option = Checkbutton(
frame_left, text='Search for:',
variable=self.search_option_value, anchor=E,
command=lambda: self._enable_entry_widget(search_entry,
self.search_option_value))
search_option.grid(row=1, column=0, pady=3, sticky=W, padx=5)
self.group_folder_name = StringVar()
group_folder_entry = ttk.Entry(
frame_left, width=15, state='disabled', textvariable=self.group_folder_name)
group_folder_entry.grid(row=2, column=1, padx=5, pady=2, sticky=S)
group_folder_entry.bind('<FocusIn>', lambda event,
widget=group_folder_entry,
variable=self.group_folder_name: self._clear_entry_help(widget,
variable))
group_folder_entry.bind('<FocusOut>', lambda event,
widget=group_folder_entry,
variable=self.group_folder_name: self._show_entry_help(widget,
variable))
self.group_folder_value = IntVar()
group_folder_option = Checkbutton(
frame_left, text='Group into folder',
variable=self.group_folder_value,
command=lambda: self._enable_entry_widget(group_folder_entry, self.group_folder_value))
group_folder_option.grid(row=2, column=0, pady=3, sticky=W, padx=5)
extension_button = Checkbutton(
frame_left, text='Group by file type',
variable=self.by_extension)
extension_button.grid(row=3, column=0, pady=3, sticky=W, padx=5)
# For frame_left
other_separator = ttk.Separator(frame_right)
other_separator.grid(row=0, column=0)
recursive_option = Checkbutton(
frame_right, text='Look into sub-folders', variable=self.recursive)
recursive_option.grid(row=1, column=0, sticky=W, pady=3)
self.types_window = None
self.items_option = Checkbutton(frame_right, text='Select file types',
variable=types_value,
command=lambda: self._show_types_window(types_value))
self.items_option.grid(row=2, column=0, sticky=W, pady=3)
self.logs_option = Checkbutton(frame_right, text='Show logs',
variable=self.show_logs,
command=self._show_progress)
self.logs_option.grid(row=3, column=0, sticky=W, pady=3)
# Configure action buttons
self.run_button = ttk.Button(self.bottom_frame,
text='Run',
command=self._run_sorter)
self.run_button.pack(side=LEFT, padx=5)
self.quit_button = ttk.Button(self.bottom_frame,
text='Quit',
command=self._show_exit_dialog)
self.quit_button.pack(side=RIGHT, padx=5)
# Configure status bar
self.status_bar = ttk.Label(self, text=' Ready',
relief=SUNKEN, anchor=W)
self.status_bar.pack(side=BOTTOM, fill=X)
# Configure progress bar
self.progress_var = DoubleVar()
self.progress_bar = ttk.Progressbar(self.status_bar,
style="blue.Horizontal.TProgressbar", variable=self.progress_var,
orient=HORIZONTAL, length=120)
self.progress_bar.pack(side=RIGHT, pady=3, padx=5)
self.progress_var.set(100)
self.interface_helper = InterfaceHelper(
progress_bar=self.progress_bar, progress_var=self.progress_var,
status=self.status_bar, messagebox=messagebox, progress_info=self.progress_info)
logger.info('Finished GUI initialisation. Waiting...')
self.progress_info.set('{} Ready.\n'.format(datetime.now()))
def _on_mousewheel(self, event, canvas, count):
try:
canvas.yview_scroll(count, "units")
except TclError:
pass
def _evaluate(self, event, entry_widget, window):
count = entry_widget.get()
num = 10
try:
num = int(count)
except ValueError:
pass
else:
num = num or 10
finally:
window.destroy()
self._get_history(num)
def _show_history(self):
history_window = Toplevel(self)
history_window.resizable(height=False, width=False)
history_window.geometry('{0}x{1}+{2}+{3}'.format(200, 90, 300, 150))
history_label = ttk.Label(
history_window, text='Enter number: ', background=self.bg)
history_label.grid(row=0, column=0, padx=5, pady=5)
history_entry = ttk.Entry(history_window, width=10)
history_entry.grid(row=0, column=1, padx=5, pady=5)
history_entry.focus_set()
help_text = ttk.Label(history_window, text='Number of files (in history) to view.\n\nPress Enter when done.',
background=self.bg, foreground="#C0C0C0", anchor=CENTER, justify='center')
help_text.grid(row=1, column=0, columnspan=2, padx=5, pady=5)
history_window.bind('<Return>',
lambda event, entry_widget=history_entry, window=history_window: self._evaluate(event,
entry_widget,
window))
history_window.bind('<KP_Enter>',
lambda event, entry_widget=history_entry, window=history_window: self._evaluate(event,
entry_widget,
window))
self._set_window_attributes(history_window, 'History')
def _get_history(self, count):
files = self.db_helper.get_history(count)
if not files:
error_msg = 'No data found!'
messagebox.showwarning(title='Warning', message=error_msg)
logger.warning('Error accessing history:: %s', error_msg)
else:
history_window = Toplevel(self)
history_window.geometry(
'{0}x{1}+{2}+{3}'.format(500, 400, 300, 150))
canvas = self._create_canvas(history_window)
frame = Frame(canvas, background="#C0C0C0")
frame.pack(side=LEFT)
canvas.create_window(0, 0, anchor=NW, window=frame)
PADX, PADY, IPADX, IPADY = 1, 1, 1, 1
# Add items to canvas
llabel = ttk.Label(frame, text='Filename', anchor=N, relief=SUNKEN,
background=self.bg, borderwidth=0)
llabel.grid(row=0, column=0, sticky="nsew", padx=PADX, pady=3)
llabel = ttk.Label(frame, text='Original location', anchor=N, relief=SUNKEN,
background=self.bg, borderwidth=0)
llabel.grid(row=0, column=1, sticky="nsew", padx=PADX, pady=3)
llabel = ttk.Label(frame, text='Current location', anchor=N, relief=SUNKEN,
background=self.bg, borderwidth=0)
llabel.grid(row=0, column=2, sticky="nsew", padx=PADX, pady=3)
llabel = ttk.Label(frame, anchor=N, relief=SUNKEN,
background=self.bg, borderwidth=0)
llabel.grid(row=0, column=3, sticky="nsew", padx=0, pady=0)
for count, item in enumerate(files, 1):
item_path_object = item.filename_path
original_location = item_path_object.first().source
current_location = item_path_object.last().destination
filename_label = Message(frame, width=400, relief=RAISED, text=item.filename,
anchor=CENTER, background=self.bg, borderwidth=0)
filename_label.grid(row=count, column=0, padx=PADX, pady=PADY,
ipadx=IPADX, ipady=IPADY, sticky="nsew")
o_loc_label = Message(frame, width=400, relief=RAISED,
text=original_location, anchor=W, background=self.bg, borderwidth=0)
o_loc_label.grid(row=count, column=1, padx=PADX, pady=PADY,
ipadx=IPADX, ipady=IPADY, sticky="nsew")
c_loc_label = Message(frame, width=400, relief=SUNKEN,
text=current_location, anchor=W, background=self.bg, borderwidth=0)
c_loc_label.grid(row=count, column=2, padx=PADX, pady=PADY,
ipadx=IPADX, ipady=IPADY, sticky="nsew")
button_label = ttk.Label(
frame, width=400, relief=RAISED, anchor=W, background=self.bg, borderwidth=0)
button_label.grid(row=count, column=3, padx=0, pady=0,
ipadx=IPADX, ipady=IPADY, sticky="nsew")
button = ttk.Button(button_label, text='Open location',
command=lambda location=os.path.dirname(current_location): get().open(location))
button.grid(sticky="ns", padx=10, pady=10)
# Hack: Alter height to refit contents | |
self._request(query_string)
#/api-de-dados/emendas/documentos/{codigo}
def documentos_codigo(self, codigo: str, pagina: int):
"""
Consulta os documentos relacionados à emenda parlamentar pelo código da emenda
Parameters
----------
codigo: str
Código da emenda
pagina: int
Página consultada
"""
query_string = f'/{codigo}'
endpoint = 'emendas/documentos'
return self._request(query_string)
class CEPIM(Portal):
#/api-de-dados/cepim
def cepim(self, pagina: int=1, cnpjSancionado: str=None, nomeSancionado: str=None, orgaoEntidade: str=None, ufSancionado: str=None):
"""
Consulta os registros do CEPIM por CNPJ ou CPF Sancionado/Órgão superior
Parameters
----------
pagina: int
Página consultada
cnpjSancionado: str=None
CNPJ do Sancionado
nomeSancionado: str=None
Nome, nome fantasia ou razão social do Sancionado
orgaoEntidade: str=None
Órgão/Entidade
ufSancionado: str=None
UF do Sancionado (sigla)
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/cepim/{id}
def cepim_id(self, id: int):
"""
Consulta um registro do CEPIM pelo id
Parameters
----------
id: int
ID do registro
"""
query_string = f'/{id}'
endpoint = 'cepim'
return self._request(query_string)
class GarantiaSafra(Portal):
#/api-de-dados/safra-codigo-por-cpf-ou-nis
def safra_codigo_por_cpf_ou_nis(self, codigo: str, pagina: int):
"""
Consulta os registros Garantia-Safra por CPF/NIS
Parameters
----------
codigo: str
CPF/NIS
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/safra-por-municipio
def safra_por_municipio(self, codigoIbge: str, mesAno: int, pagina: int):
"""
Consulta os registros Garantia-Safra
Parameters
----------
codigoIbge: str
Código IBGE
mesAno: int
Mês e Ano de referência (AAAAMM)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class GastosCartaoDePagamento(Portal):
#/api-de-dados/cartoes
def cartoes(self, pagina: int=1, codigoOrgao: str=None, cpfCnpjFavorecido: str=None, cpfPortador: str=None, dataTransacaoFim: str=None, dataTransacaoInicio: str=None, mesExtratoFim: str=None, mesExtratoInicio: str=None, tipoCartao: int=None, valorAte: str=None, valorDe: str=None):
"""
Consulta os registros de Cartões de Pagamento
Parameters
----------
pagina: int
Página consultada
codigoOrgao: str=None
Órgão/Entidade (Código SIAFI)
cpfCnpjFavorecido: str=None
Favorecido (CPF/CNPJ)
cpfPortador: str=None
Portador (CPF)
dataTransacaoFim: str=None
Data transação fim (DD/MM/AAAA)
dataTransacaoInicio: str=None
Data transação início (DD/MM/AAAA)
mesExtratoFim: str=None
Mês extrato fim (MM/AAAA)
mesExtratoInicio: str=None
Mês extrato início (MM/AAAA)
tipoCartao: int=None
Tipo de cartão (CPGF=1 ou CPCC=2 ou CPDC=3)
valorAte: str=None
Valor até (####,##)
valorDe: str=None
Valor de (####,##)
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class ImoveisFuncionais(Portal):
#/api-de-dados/imoveis
def imoveis(self, pagina: int=1, cep: str=None, codigoOrgaoSiafiResponsavelGestao: str=None, endereco: str=None, regiao: str=None, situacao: str=None):
"""
Consulta relação de imóveis
Parameters
----------
pagina: int
Página consultada
cep: str=None
CEP
codigoOrgaoSiafiResponsavelGestao: str=None
Código do Órgão (SIAFI)
endereco: str=None
Endereço
regiao: str=None
Região
situacao: str=None
Situação Imóvel
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/permissionarios
def permissionarios(self, pagina: int=1, codigoOrgaoSiafiOcupante: str=None, cpfOcupante: str=None, dataFimOcupacao: str=None, dataInicioOcupacao: str=None):
"""
Consulta relação de ocupantes
Parameters
----------
pagina: int
Página consultada
codigoOrgaoSiafiOcupante: str=None
Código do Órgão (SIAFI)
cpfOcupante: str=None
CPF Ocupante
dataFimOcupacao: str=None
Data fim ocupação (DD/MM/AAAA)
dataInicioOcupacao: str=None
Data início ocupação(DD/MM/AAAA)
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/situacao-imovel
def situacao_imovel(self):
"""
Consulta situações dos imóveis funcionais
Parameters
----------
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class LicitacoesPEF(Portal):
def __init__(self, token):
super().__init__(token)
self.base_url = self.base_url.format(endpoint='licitacoes/{endpoint}')
#/api-de-dados/licitacoes
def todos(self, codigoOrgao: str, dataFinal: str, dataInicial: str, pagina: int):
"""
Consulta todas as licitações do Poder Executivo Federal
Parameters
----------
codigoOrgao: str
Código do Órgão (SIAFI)
dataFinal: str
Data de abertura final (DD/MM/AAAA)
dataInicial: str
Data de abertura inicial (DD/MM/AAAA)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
endpoint = ''
return self._request(query_string, endpoint)
#/api-de-dados/licitacoes/{id}
def licitacoes_id(self, id: int):
"""
Consulta uma licitação do Poder Executivo Federal pelo id
Parameters
----------
id: int
ID do registro
"""
query_string = f'/{id}'
endpoint =''
return self._request(query_string, endpoint)
#/api-de-dados/licitacoes/contratos-relacionados-licitacao
def contratos_relacionados_licitacao(self, codigoModalidade: str, codigoUG: str, numero: str):
"""
Consulta os contratos relacionados a licitação
Parameters
----------
codigoModalidade: str
Código da Modalidade da Licitação
codigoUG: str
Código da Unidade Gestora
numero: str
Número da Licitação (NNNNNAAAA)
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/licitacoes/empenhos
def empenhos(self, codigoModalidade: str, codigoUG: str, numero: str, pagina: int):
"""
Consulta os empenhos de uma licitação
Parameters
----------
codigoModalidade: str
Código da Modalidade da Licitação
codigoUG: str
Código da Unidade Gestora
numero: str
Número da Licitação (NNNNNAAAA)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/licitacoes/modalidades
def modalidades(self, ):
"""
Consulta as modalidades de licitação
Parameters
----------
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/licitacoes/participantes
def participantes(self, codigoModalidade: str, codigoUG: str, numero: str, pagina: int):
"""
Consulta os participantes de uma licitação
Parameters
----------
codigoModalidade: str
Código da Modalidade da Licitação
codigoUG: str
Código da Unidade Gestora
numero: str
Número da Licitação (NNNNNAAAA)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/licitacoes/por-ug-modalidade-numero
def por_ug_modalidade_numero(self, codigoModalidade: str, codigoUG: str, numero: str):
"""
Consulta uma licitação pelo código da Unidade Gestora, número e modalidade
Parameters
----------
codigoModalidade: str
Código da Modalidade da Licitação
codigoUG: str
Código da Unidade Gestora
numero: str
Número da Licitação (NNNNNAAAA)
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/licitacoes/ugs
def ugs(self, pagina: int):
"""
Consulta as Unidades Gestoras que realizaram licitações
Parameters
----------
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class Peti(Portal):
#/api-de-dados/peti-por-cpf-ou-nis
def peti_por_cpf_ou_nis(self, codigo: str, pagina: int):
"""
Consulta os registros Programa de Erradicação do Trabalho Infantil por CPF/NIS
Parameters
----------
codigo: str
CPF/NIS
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/peti-por-municipio
def peti_por_municipio(self, codigoIbge: str, mesAno: int, pagina: int):
"""
Consulta os registros Programa de Erradicação do Trabalho Infantil
Parameters
----------
codigoIbge: str
Código IBGE
mesAno: int
Mês e Ano de referência (AAAAMM)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class SeguroDefeso(Portal):
#/api-de-dados/seguro-defeso-codigo
def seguro_defeso_codigo(self, codigo: str, pagina: int):
"""
Consulta os registros Seguro Defeso por CPF/NIS
Parameters
----------
codigo: str
CPF/NIS
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/seguro-defeso-por-municipio
def seguro_defeso_por_municipio(self, codigoIbge: str, mesAno: int, pagina: int):
"""
Consulta os registros Seguro Defeso
Parameters
----------
codigoIbge: str
Código IBGE
mesAno: int
Mês e Ano de referência (AAAAMM)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class ServidoresPEF(Portal):
def __init__(self, token):
super().__init__(token)
self.base_url = self.base_url.format(endpoint='servidores/{endpoint}')
#/api-de-dados/servidores
def todos(self, pagina: int=1, codigoFuncaoCargo: str=None, cpf: str=None, nome: str=None, orgaoServidorExercicio: str=None, orgaoServidorLotacao: str=None, situacaoServidor: int=None, tipoServidor: int=None):
"""
Consulta todos servidores do Poder Executivo Federal
Parameters
----------
pagina: int
Página consultada
codigoFuncaoCargo: str=None
Código da Função ou Cargo de Confiança
cpf: str=None
CPF do Servidor
nome: str=None
Nome do Servidor
orgaoServidorExercicio: str=None
Código Órgão Lotação (SIAPE)
orgaoServidorLotacao: str=None
Código Órgão Exercício (SIAPE)
situacaoServidor: int=None
Situação do Servidor (Ativo=1, Inativo=2 ou Pensionista=3)
tipoServidor: int=None
Tipo do Servidor (Civil=1 ou Militar=2)
"""
query_string = self._query_string(locals().copy())
endpoint = ''
return self._request(query_string, endpoint)
#/api-de-dados/servidores/{id}
def servidores_id(self, id: int):
"""
Consulta um servidor do Poder Executivo Federal pelo id
Parameters
----------
id: int
ID do registro
"""
query_string = f'/{id}'
endpoint = ''
return self._request(query_string, endpoint)
#/api-de-dados/servidores/funcoes-e-cargos
def funcoes_e_cargos(self, pagina: int):
"""
Código da Função ou Cargo de Confiança
Parameters
----------
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/servidores/por-orgao
def por_orgao(self, pagina: int=1, licenca: int=None, orgaoExercicio: str=None, orgaoLotacao: str=None, tipoServidor: int=None, tipoVinculo: int=None):
"""
Consulta de servidores agregados por órgão
Parameters
----------
pagina: int
Página consultada
licenca: int=None
Licença (Sim: 1; Não: 0)
orgaoExercicio: str=None
Código Órgão Lotação (SIAPE)
orgaoLotacao: str=None
Código Órgão Exercício (SIAPE)
tipoServidor: int=None
Tipo servidor (Civil: 1; Militar: 2)
tipoVinculo: int=None
Tipo vínculo (Função: 1; Cargo: 2; Outros: 3; Militares: 4
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/servidores/remuneracao
def remuneracao(self, cpf: str, mesAno: int, pagina: int):
"""
Consulta remunerações de um servidor do Poder Executivo Federal pelo CPF e mês/ano
Parameters
----------
cpf: str
CPF do Servidor
mesAno: int
Mês e Ano de referência (AAAAMM)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
class Viagens(Portal):
#/api-de-dados/viagens
def viagens(self, codigoOrgao: str, dataIdaAte: str, dataIdaDe: str, dataRetornoAte: str, dataRetornoDe: str, pagina: int):
"""
Consulta viagens por período
Parameters
----------
codigoOrgao: str
Código do Órgão (SIAFI)
dataIdaAte: str
Data de ida até (DD/MM/AAAA)
dataIdaDe: str
Data de ida a partir de (DD/MM/AAAA)
dataRetornoAte: str
Data de retorno até (DD/MM/AAAA)
dataRetornoDe: str
Data de retorno a partir de (DD/MM/AAAA)
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/viagens-por-cpf
def viagens_por_cpf(self, cpf: str, pagina: int):
"""
Consulta viagens por CPF
Parameters
----------
cpf: str
CPF
pagina: int
Página consultada
"""
query_string = self._query_string(locals().copy())
return self._request(query_string)
#/api-de-dados/viagens/{id}
def viagens_id(self, id: int):
"""
| |
["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
if window_radius == 16000000:
wpos = coord_clip(mpos, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
)
for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if has_target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if mstart - mend < 2 * window_radius:
anno_scaled = process_anno(
[
[
np.clip(mstart, wpos - window_radius, wpos + window_radius),
np.clip(mend, wpos - window_radius, wpos + window_radius),
"black",
]
],
base=wpos - window_radius,
window_radius=window_radius,
)
else:
anno_scaled = None
if window_radius == 128000000:
outputs_ref = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mpos,
wpos,
models=models,
annotation=anno_scaled,
padding_chr=padding_chr,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref = genomepredict(
sequence, mchr, mpos, wpos, annotation=anno_scaled, models=models, targets=targets, use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref, show_coordinates=True, file=file + ".256m.pdf",
)
else:
genomeplot(
outputs_ref,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".pdf",
)
return outputs_ref
def process_dup(
mchr,
mstart,
mend,
genome,
file=None,
custom_models=None,
target=True,
show_genes=True,
show_tracks=False,
window_radius=16000000,
padding_chr="chr1",
use_cuda=True,
):
"""
Generate multiscale genome interaction predictions for
an duplication variant.
Parameters
----------
mchr : str
The chromosome name of the first segment
mstart : int
The start coordinate of the duplication.
mend : ind
The end coordinate of the duplication.
genome : selene_utils2.MemmapGenome or selene_sdk.sequences.Genome
The reference genome object to extract sequence from
custom_models : list(torch.nn.Module or str) or None, optional
Models to use instead of the default H1-ESC and HFF Orca models.
Default is None.
target : list(selene_utils2.Genomic2DFeatures or str) or bool, optional
If specified as list, use this list of targets to retrieve experimental
data (for plotting only). Default is True and will use micro-C data
for H1-ESC and HFF cells (4DNFI9GMP2J8, 4DNFI643OYP9) that correspond
to the default models.
file : str or None, optional
Default is None. The output file prefix.
show_genes : bool, optional
Default is True. If True, generate gene annotation visualization
file in pdf format that matches the windows of multiscale predictions.
show_tracks : bool, optional
Default is False. If True, generate chromatin tracks visualization
file in pdf format that matches the windows of multiscale predictions.
window_radius : int, optional
Default is 16000000. The acceptable values are 16000000 which selects
the 1-32Mb models or 128000000 which selects the 32-256Mb models.
padding_chr : str, optional
Default is "chr1". If window_radius is 128000000, padding is generally
needed to fill the sequence to 256Mb. The padding sequence will be
extracted from the padding_chr.
use_cuda : bool, optional
Default is True. Use CPU if False.
Returns
-------
outputs_ref_l, outputs_ref_r, outputs_alt : dict, dict, dict
Reference allele predictions zooming into the left boundary of the
duplication,
Reference allele predictions zooming into the right boundary of the
duplication,
Alternative allele predictions zooming into the duplication breakpoint.
The returned results are in the format of dictonaries
containing the prediction outputs and other
retrieved information. These dictionaries can be directly used as
input to genomeplot or genomeplot_256Mb. See documentation of `genomepredict` or `genomepredict_256Mb` for
details of the dictionary content.
"""
chrlen = [l for c, l in genome.get_chr_lens() if c == mchr].pop()
if custom_models is None:
if window_radius == 16000000:
models = ["h1esc", "hff"]
elif window_radius == 128000000:
models = ["h1esc_256m", "hff_256m"]
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
else:
models = custom_models
if target:
try:
if target == True:
if window_radius == 16000000:
target = ["h1esc", "hff"]
elif window_radius == 128000000:
target = ["h1esc_256m", "hff_256m"]
target = [t if isinstance(t, Genomic2DFeatures) else target_dict_global[t] for t in target]
except KeyError:
target = False
# ref.l
if window_radius == 16000000:
wpos = coord_clip(mstart, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
elif window_radius == 128000000:
chrlen_round = chrlen - chrlen % 32000
wpos = 128000000
if target:
sequence, normmats, targets = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
else:
sequence, normmats = _retrieve_multi(
[[mchr, 0, chrlen_round, "+"], [padding_chr, 0, 256000000 - chrlen_round, "+"]],
genome,
target=target,
)
targets = None
else:
raise ValueError(
"Only window_radius 16000000 (32Mb models) or 128000000 (256Mb models) are supported"
)
if wpos + window_radius > mend:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[mstart, wpos + window_radius, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 128000000:
outputs_ref_l = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mstart,
wpos,
annotation=anno_scaled,
padding_chr=padding_chr,
models=models,
targets=targets,
use_cuda=use_cuda,
)
else:
outputs_ref_l = genomepredict(
sequence,
mchr,
mstart,
wpos,
annotation=anno_scaled,
models=models,
targets=targets,
use_cuda=use_cuda,
)
if file is not None:
if window_radius == 128000000:
genomeplot_256Mb(
outputs_ref_l, show_coordinates=True, file=file + ".ref.l.256m.pdf",
)
else:
genomeplot(
outputs_ref_l,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.l.pdf",
)
# ref.r
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen)
sequence = genome.get_encoding_from_coords(
mchr, wpos - window_radius, wpos + window_radius
)[None, :]
if target:
targets = [
torch.FloatTensor(
t.get_feature_data(
mchr, coord_round(wpos - window_radius), coord_round(wpos + window_radius),
)[None, :]
) for t in target
]
else:
targets = None
if wpos - window_radius < mstart:
anno_scaled = process_anno(
[[mstart, mend, "black"]], base=wpos - window_radius, window_radius=window_radius
)
else:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"]],
base=wpos - window_radius,
window_radius=window_radius,
)
if window_radius == 16000000:
outputs_ref_r = genomepredict(
sequence, mchr, mend, wpos, models=models, annotation=anno_scaled, targets=targets, use_cuda=use_cuda,
)
if file is not None:
genomeplot(
outputs_ref_r,
show_genes=show_genes,
show_tracks=show_tracks,
show_coordinates=True,
file=file + ".ref.r.pdf",
)
else:
outputs_ref_r = genomepredict_256Mb(
sequence,
mchr,
normmats,
chrlen_round,
mend,
wpos,
annotation=anno_scaled,
padding_chr=padding_chr,
models=models,
targets=targets,
use_cuda=use_cuda,
)
genomeplot_256Mb(
outputs_ref_r, show_coordinates=True, file=file + ".ref.r.256m.pdf",
)
# alt (r)
s = StructuralChange2(mchr, chrlen)
s.duplicate(mstart, mend)
chrlen_alt = chrlen + mend - mstart
if window_radius == 16000000:
wpos = coord_clip(mend, chrlen_alt)
sequence = []
for chrm, start, end, strand in s[wpos - window_radius : wpos + window_radius]:
seq = genome.get_encoding_from_coords(chrm, start, end)
if strand == "-":
seq = seq[None, ::-1, ::-1]
else:
seq = seq[None, :, :]
sequence.append(seq)
sequence = np.concatenate(sequence, axis=1)
else:
chrlen_alt_round = chrlen_alt - chrlen_alt % 32000
if chrlen_alt_round < 256000000:
wpos = 128000000
(sequence, normmats) = _retrieve_multi(
list(s[0:chrlen_alt_round]) + [[padding_chr, 0, 256000000 - chrlen_alt_round, "+"]],
genome,
target=False,
normmat=True,
normmat_regionlist=[
[mchr, 0, chrlen_alt_round, "+"],
[padding_chr, 0, 256000000 - chrlen_alt_round, "+"],
],
)
else:
wpos = coord_clip(mend, chrlen_alt_round, window_radius=128000000)
(sequence, normmats) = _retrieve_multi(
list(s[wpos - window_radius : wpos + window_radius]),
genome,
target=False,
normmat=True,
normmat_regionlist=[[mchr, wpos - window_radius, wpos + window_radius, "+"]],
)
if wpos - window_radius < mstart and mend + mend - mstart < wpos + window_radius:
anno_scaled = process_anno(
[[mstart, mend, "black"], [mend, mend + mend - mstart, "gray"]],
base=wpos - window_radius,
window_radius=window_radius,
)
elif wpos - window_radius >= mstart and mend + mend - mstart < wpos + window_radius:
anno_scaled = process_anno(
[[wpos - window_radius, mend, "black"], [mend, mend + mend - mstart, "gray"],],
base=wpos - window_radius,
window_radius=window_radius,
)
elif wpos - window_radius < mstart and mend + mend - | |
<reponame>finbourne/lusid-sdk-python-generated-preview
# coding: utf-8
# flake8: noqa
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.4425
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
__version__ = "0.11.4425"
# import apis into sdk package
from lusid.api.aggregation_api import AggregationApi
from lusid.api.allocations_api import AllocationsApi
from lusid.api.application_metadata_api import ApplicationMetadataApi
from lusid.api.blocks_api import BlocksApi
from lusid.api.calendars_api import CalendarsApi
from lusid.api.complex_market_data_api import ComplexMarketDataApi
from lusid.api.compliance_api import ComplianceApi
from lusid.api.configuration_recipe_api import ConfigurationRecipeApi
from lusid.api.conventions_api import ConventionsApi
from lusid.api.corporate_action_sources_api import CorporateActionSourcesApi
from lusid.api.counterparties_api import CounterpartiesApi
from lusid.api.custom_entities_api import CustomEntitiesApi
from lusid.api.custom_entity_definitions_api import CustomEntityDefinitionsApi
from lusid.api.cut_label_definitions_api import CutLabelDefinitionsApi
from lusid.api.data_types_api import DataTypesApi
from lusid.api.derived_transaction_portfolios_api import DerivedTransactionPortfoliosApi
from lusid.api.entities_api import EntitiesApi
from lusid.api.executions_api import ExecutionsApi
from lusid.api.fees_and_commissions_api import FeesAndCommissionsApi
from lusid.api.instruments_api import InstrumentsApi
from lusid.api.legal_entities_api import LegalEntitiesApi
from lusid.api.order_graph_api import OrderGraphApi
from lusid.api.order_instructions_api import OrderInstructionsApi
from lusid.api.orders_api import OrdersApi
from lusid.api.packages_api import PackagesApi
from lusid.api.participations_api import ParticipationsApi
from lusid.api.persons_api import PersonsApi
from lusid.api.placements_api import PlacementsApi
from lusid.api.portfolio_groups_api import PortfolioGroupsApi
from lusid.api.portfolios_api import PortfoliosApi
from lusid.api.property_definitions_api import PropertyDefinitionsApi
from lusid.api.quotes_api import QuotesApi
from lusid.api.reconciliations_api import ReconciliationsApi
from lusid.api.reference_portfolio_api import ReferencePortfolioApi
from lusid.api.relation_definitions_api import RelationDefinitionsApi
from lusid.api.relations_api import RelationsApi
from lusid.api.relationship_definitions_api import RelationshipDefinitionsApi
from lusid.api.relationships_api import RelationshipsApi
from lusid.api.schemas_api import SchemasApi
from lusid.api.scopes_api import ScopesApi
from lusid.api.search_api import SearchApi
from lusid.api.sequences_api import SequencesApi
from lusid.api.structured_result_data_api import StructuredResultDataApi
from lusid.api.system_configuration_api import SystemConfigurationApi
from lusid.api.transaction_configuration_api import TransactionConfigurationApi
from lusid.api.transaction_fees_api import TransactionFeesApi
from lusid.api.transaction_portfolios_api import TransactionPortfoliosApi
from lusid.api.translation_api import TranslationApi
# import ApiClient
from lusid.api_client import ApiClient
from lusid.configuration import Configuration
from lusid.exceptions import OpenApiException
from lusid.exceptions import ApiTypeError
from lusid.exceptions import ApiValueError
from lusid.exceptions import ApiKeyError
from lusid.exceptions import ApiException
# import models into sdk package
from lusid.models.a2_b_breakdown import A2BBreakdown
from lusid.models.a2_b_category import A2BCategory
from lusid.models.a2_b_data_record import A2BDataRecord
from lusid.models.a2_b_movement_record import A2BMovementRecord
from lusid.models.access_controlled_action import AccessControlledAction
from lusid.models.access_controlled_resource import AccessControlledResource
from lusid.models.access_metadata_value import AccessMetadataValue
from lusid.models.accounting_method import AccountingMethod
from lusid.models.action_id import ActionId
from lusid.models.action_result_of_portfolio import ActionResultOfPortfolio
from lusid.models.add_business_days_to_date_request import AddBusinessDaysToDateRequest
from lusid.models.add_business_days_to_date_response import AddBusinessDaysToDateResponse
from lusid.models.adjust_holding import AdjustHolding
from lusid.models.adjust_holding_request import AdjustHoldingRequest
from lusid.models.aggregate_spec import AggregateSpec
from lusid.models.aggregated_return import AggregatedReturn
from lusid.models.aggregated_returns_request import AggregatedReturnsRequest
from lusid.models.aggregated_returns_response import AggregatedReturnsResponse
from lusid.models.aggregation_context import AggregationContext
from lusid.models.aggregation_measure_failure_detail import AggregationMeasureFailureDetail
from lusid.models.aggregation_op import AggregationOp
from lusid.models.aggregation_options import AggregationOptions
from lusid.models.aggregation_query import AggregationQuery
from lusid.models.aggregation_type import AggregationType
from lusid.models.allocation import Allocation
from lusid.models.allocation_request import AllocationRequest
from lusid.models.allocation_set_request import AllocationSetRequest
from lusid.models.annul_quotes_response import AnnulQuotesResponse
from lusid.models.annul_single_structured_data_response import AnnulSingleStructuredDataResponse
from lusid.models.annul_structured_data_response import AnnulStructuredDataResponse
from lusid.models.asset_class import AssetClass
from lusid.models.basket import Basket
from lusid.models.basket_all_of import BasketAllOf
from lusid.models.basket_identifier import BasketIdentifier
from lusid.models.block import Block
from lusid.models.block_request import BlockRequest
from lusid.models.block_set_request import BlockSetRequest
from lusid.models.bond import Bond
from lusid.models.bond_all_of import BondAllOf
from lusid.models.bucketed_cash_flow_request import BucketedCashFlowRequest
from lusid.models.bucketed_cash_flow_response import BucketedCashFlowResponse
from lusid.models.calculation_info import CalculationInfo
from lusid.models.calendar import Calendar
from lusid.models.calendar_date import CalendarDate
from lusid.models.cap_floor import CapFloor
from lusid.models.cap_floor_all_of import CapFloorAllOf
from lusid.models.cash_flow_lineage import CashFlowLineage
from lusid.models.cash_flow_value import CashFlowValue
from lusid.models.cash_flow_value_all_of import CashFlowValueAllOf
from lusid.models.cash_flow_value_set import CashFlowValueSet
from lusid.models.cash_flow_value_set_all_of import CashFlowValueSetAllOf
from lusid.models.cash_ladder_record import CashLadderRecord
from lusid.models.cash_perpetual import CashPerpetual
from lusid.models.cash_perpetual_all_of import CashPerpetualAllOf
from lusid.models.cds_flow_conventions import CdsFlowConventions
from lusid.models.cds_index import CdsIndex
from lusid.models.cds_index_all_of import CdsIndexAllOf
from lusid.models.cds_protection_detail_specification import CdsProtectionDetailSpecification
from lusid.models.change import Change
from lusid.models.complete_portfolio import CompletePortfolio
from lusid.models.complete_relation import CompleteRelation
from lusid.models.complete_relationship import CompleteRelationship
from lusid.models.complex_bond import ComplexBond
from lusid.models.complex_bond_all_of import ComplexBondAllOf
from lusid.models.complex_market_data import ComplexMarketData
from lusid.models.complex_market_data_id import ComplexMarketDataId
from lusid.models.compliance_rule import ComplianceRule
from lusid.models.compliance_rule_result import ComplianceRuleResult
from lusid.models.compliance_rule_upsert_request import ComplianceRuleUpsertRequest
from lusid.models.compliance_rule_upsert_response import ComplianceRuleUpsertResponse
from lusid.models.compliance_run import ComplianceRun
from lusid.models.compounding import Compounding
from lusid.models.configuration_recipe import ConfigurationRecipe
from lusid.models.configuration_recipe_snippet import ConfigurationRecipeSnippet
from lusid.models.constituents_adjustment_header import ConstituentsAdjustmentHeader
from lusid.models.contract_for_difference import ContractForDifference
from lusid.models.contract_for_difference_all_of import ContractForDifferenceAllOf
from lusid.models.corporate_action import CorporateAction
from lusid.models.corporate_action_source import CorporateActionSource
from lusid.models.corporate_action_transition import CorporateActionTransition
from lusid.models.corporate_action_transition_component import CorporateActionTransitionComponent
from lusid.models.corporate_action_transition_component_request import CorporateActionTransitionComponentRequest
from lusid.models.corporate_action_transition_request import CorporateActionTransitionRequest
from lusid.models.counterparty_agreement import CounterpartyAgreement
from lusid.models.counterparty_risk_information import CounterpartyRiskInformation
from lusid.models.counterparty_signatory import CounterpartySignatory
from lusid.models.create_calendar_request import CreateCalendarRequest
from lusid.models.create_corporate_action_source_request import CreateCorporateActionSourceRequest
from lusid.models.create_cut_label_definition_request import CreateCutLabelDefinitionRequest
from lusid.models.create_data_map_request import CreateDataMapRequest
from lusid.models.create_data_type_request import CreateDataTypeRequest
from lusid.models.create_date_request import CreateDateRequest
from lusid.models.create_derived_property_definition_request import CreateDerivedPropertyDefinitionRequest
from lusid.models.create_derived_transaction_portfolio_request import CreateDerivedTransactionPortfolioRequest
from lusid.models.create_portfolio_details import CreatePortfolioDetails
from lusid.models.create_portfolio_group_request import CreatePortfolioGroupRequest
from lusid.models.create_property_definition_request import CreatePropertyDefinitionRequest
from lusid.models.create_recipe_request import CreateRecipeRequest
from lusid.models.create_reference_portfolio_request import CreateReferencePortfolioRequest
from lusid.models.create_relation_definition_request import CreateRelationDefinitionRequest
from lusid.models.create_relation_request import CreateRelationRequest
from lusid.models.create_relationship_definition_request import CreateRelationshipDefinitionRequest
from lusid.models.create_relationship_request import CreateRelationshipRequest
from lusid.models.create_sequence_request import CreateSequenceRequest
from lusid.models.create_transaction_portfolio_request import CreateTransactionPortfolioRequest
from lusid.models.create_unit_definition import CreateUnitDefinition
from lusid.models.credit_default_swap import CreditDefaultSwap
from lusid.models.credit_default_swap_all_of import CreditDefaultSwapAllOf
from lusid.models.credit_rating import CreditRating
from lusid.models.credit_spread_curve_data import CreditSpreadCurveData
from lusid.models.credit_spread_curve_data_all_of import CreditSpreadCurveDataAllOf
from lusid.models.credit_support_annex import CreditSupportAnnex
from lusid.models.currency_and_amount import CurrencyAndAmount
from lusid.models.custom_entity_definition import CustomEntityDefinition
from lusid.models.custom_entity_definition_request import CustomEntityDefinitionRequest
from lusid.models.custom_entity_field import CustomEntityField
from lusid.models.custom_entity_field_definition import CustomEntityFieldDefinition
from lusid.models.custom_entity_id import CustomEntityId
from lusid.models.custom_entity_request import CustomEntityRequest
from lusid.models.custom_entity_response import CustomEntityResponse
from lusid.models.cut_label_definition import CutLabelDefinition
from lusid.models.cut_local_time import CutLocalTime
from lusid.models.data_definition import DataDefinition
from lusid.models.data_map_key import DataMapKey
from lusid.models.data_mapping import DataMapping
from lusid.models.data_type import DataType
from lusid.models.data_type_summary import DataTypeSummary
from lusid.models.data_type_value_range import DataTypeValueRange
from lusid.models.date_attributes import DateAttributes
from lusid.models.date_range import DateRange
from lusid.models.date_time_comparison_type import DateTimeComparisonType
from lusid.models.day_of_week import DayOfWeek
from lusid.models.delete_instrument_properties_response import DeleteInstrumentPropertiesResponse
from lusid.models.delete_instrument_response import DeleteInstrumentResponse
from lusid.models.delete_relation_request import DeleteRelationRequest
from lusid.models.delete_relationship_request import DeleteRelationshipRequest
from lusid.models.deleted_entity_response import DeletedEntityResponse
from lusid.models.dependency_source_filter import DependencySourceFilter
from lusid.models.discount_factor_curve_data import DiscountFactorCurveData
from lusid.models.discount_factor_curve_data_all_of import DiscountFactorCurveDataAllOf
from lusid.models.discounting_method import DiscountingMethod
from lusid.models.empty_model_options import EmptyModelOptions
from lusid.models.empty_model_options_all_of import EmptyModelOptionsAllOf
from lusid.models.entity_identifier import EntityIdentifier
from lusid.models.equity import Equity
from lusid.models.equity_all_of import EquityAllOf
from lusid.models.equity_all_of_identifiers import EquityAllOfIdentifiers
from lusid.models.equity_option import EquityOption
from lusid.models.equity_option_all_of import EquityOptionAllOf
from lusid.models.equity_swap import EquitySwap
from lusid.models.equity_swap_all_of import EquitySwapAllOf
from lusid.models.equity_vol_surface_data import EquityVolSurfaceData
from lusid.models.equity_vol_surface_data_all_of import EquityVolSurfaceDataAllOf
from lusid.models.error_detail import ErrorDetail
from lusid.models.exchange_traded_option import ExchangeTradedOption
from lusid.models.exchange_traded_option_all_of import ExchangeTradedOptionAllOf
from lusid.models.exchange_traded_option_contract_details import ExchangeTradedOptionContractDetails
from lusid.models.execution import Execution
from lusid.models.execution_request import ExecutionRequest
from lusid.models.execution_set_request import ExecutionSetRequest
from lusid.models.exotic_instrument import ExoticInstrument
from lusid.models.exotic_instrument_all_of import ExoticInstrumentAllOf
from lusid.models.expanded_group import ExpandedGroup
from lusid.models.fee_calculation_details import FeeCalculationDetails
from lusid.models.fee_rule import FeeRule
from lusid.models.fee_rule_upsert_request import FeeRuleUpsertRequest
from lusid.models.fee_rule_upsert_response import FeeRuleUpsertResponse
from lusid.models.field_definition import FieldDefinition
from lusid.models.field_schema import FieldSchema
from lusid.models.field_value import FieldValue
from lusid.models.file_response import FileResponse
from lusid.models.fixed_leg import FixedLeg
from lusid.models.fixed_leg_all_of import FixedLegAllOf
from lusid.models.fixed_leg_all_of_overrides import FixedLegAllOfOverrides
from lusid.models.fixed_schedule import FixedSchedule
from lusid.models.fixed_schedule_all_of import FixedScheduleAllOf
from lusid.models.float_schedule import FloatSchedule
from lusid.models.float_schedule_all_of import FloatScheduleAllOf
from lusid.models.floating_leg import FloatingLeg
from lusid.models.floating_leg_all_of import FloatingLegAllOf
from lusid.models.flow_convention_name import FlowConventionName
from lusid.models.flow_conventions import FlowConventions
from lusid.models.forward_rate_agreement import ForwardRateAgreement
from lusid.models.forward_rate_agreement_all_of import ForwardRateAgreementAllOf
from lusid.models.funding_leg import FundingLeg
from lusid.models.funding_leg_all_of import FundingLegAllOf
from lusid.models.funding_leg_options import FundingLegOptions
from lusid.models.funding_leg_options_all_of import FundingLegOptionsAllOf
from lusid.models.future import Future
from lusid.models.future_all_of import FutureAllOf
from lusid.models.futures_contract_details import FuturesContractDetails
from lusid.models.fx_forward import FxForward
from lusid.models.fx_forward_all_of import FxForwardAllOf
from lusid.models.fx_forward_curve_by_quote_reference import FxForwardCurveByQuoteReference
from lusid.models.fx_forward_curve_by_quote_reference_all_of import FxForwardCurveByQuoteReferenceAllOf
from lusid.models.fx_forward_curve_data import FxForwardCurveData
from lusid.models.fx_forward_curve_data_all_of import FxForwardCurveDataAllOf
from lusid.models.fx_forward_model_options import FxForwardModelOptions
from lusid.models.fx_forward_model_options_all_of import FxForwardModelOptionsAllOf
from lusid.models.fx_forward_pips_curve_data import FxForwardPipsCurveData
from lusid.models.fx_forward_pips_curve_data_all_of import FxForwardPipsCurveDataAllOf
from lusid.models.fx_forward_tenor_curve_data import FxForwardTenorCurveData
from lusid.models.fx_forward_tenor_curve_data_all_of import FxForwardTenorCurveDataAllOf
from lusid.models.fx_forward_tenor_pips_curve_data import FxForwardTenorPipsCurveData
from lusid.models.fx_forward_tenor_pips_curve_data_all_of import FxForwardTenorPipsCurveDataAllOf
from lusid.models.fx_option import FxOption
from lusid.models.fx_option_all_of import FxOptionAllOf
from lusid.models.fx_swap import FxSwap
from lusid.models.fx_swap_all_of import FxSwapAllOf
from lusid.models.fx_vol_surface_data import FxVolSurfaceData
from lusid.models.get_cds_flow_conventions_response import GetCdsFlowConventionsResponse
from lusid.models.get_complex_market_data_response import GetComplexMarketDataResponse
from lusid.models.get_counterparty_agreement_response import GetCounterpartyAgreementResponse
from lusid.models.get_credit_support_annex_response import GetCreditSupportAnnexResponse
from lusid.models.get_data_map_response import GetDataMapResponse
from lusid.models.get_flow_conventions_response import GetFlowConventionsResponse
from lusid.models.get_index_convention_response import GetIndexConventionResponse
from lusid.models.get_instruments_response import GetInstrumentsResponse
from lusid.models.get_quotes_response import GetQuotesResponse
from lusid.models.get_recipe_response import GetRecipeResponse
from lusid.models.get_reference_portfolio_constituents_response import GetReferencePortfolioConstituentsResponse
from lusid.models.get_structured_result_data_response import GetStructuredResultDataResponse
from lusid.models.get_virtual_document_response import GetVirtualDocumentResponse
from lusid.models.grouped_result_of_address_key import GroupedResultOfAddressKey
from lusid.models.holding_adjustment import HoldingAdjustment
from lusid.models.holding_context import HoldingContext
from lusid.models.holdings_adjustment import HoldingsAdjustment
from lusid.models.holdings_adjustment_header import HoldingsAdjustmentHeader
from lusid.models.i_unit_definition_dto import IUnitDefinitionDto
from lusid.models.id_selector_definition import IdSelectorDefinition
from lusid.models.identifier_part_schema import IdentifierPartSchema
from lusid.models.index_convention import IndexConvention
from lusid.models.index_model_options import IndexModelOptions
from lusid.models.index_model_options_all_of import IndexModelOptionsAllOf
from lusid.models.industry_classifier import IndustryClassifier
from lusid.models.inline_valuation_request import InlineValuationRequest
from lusid.models.inline_valuations_reconciliation_request import InlineValuationsReconciliationRequest
from lusid.models.instrument import Instrument
from lusid.models.instrument_cash_flow import InstrumentCashFlow
from lusid.models.instrument_definition import InstrumentDefinition
from lusid.models.instrument_definition_format import InstrumentDefinitionFormat
from lusid.models.instrument_id_type_descriptor import InstrumentIdTypeDescriptor
from lusid.models.instrument_id_value import InstrumentIdValue
from lusid.models.instrument_leg import InstrumentLeg
from lusid.models.instrument_leg_all_of import InstrumentLegAllOf
from lusid.models.instrument_match import InstrumentMatch
from lusid.models.instrument_payment_diary import InstrumentPaymentDiary
from lusid.models.instrument_payment_diary_leg import InstrumentPaymentDiaryLeg
from lusid.models.instrument_payment_diary_row import InstrumentPaymentDiaryRow
from lusid.models.instrument_properties import InstrumentProperties
from lusid.models.instrument_search_property import InstrumentSearchProperty
from lusid.models.instrument_type import InstrumentType
from lusid.models.interest_rate_swap import InterestRateSwap
from lusid.models.interest_rate_swap_all_of import InterestRateSwapAllOf
from lusid.models.interest_rate_swaption import InterestRateSwaption
from lusid.models.interest_rate_swaption_all_of import InterestRateSwaptionAllOf
from lusid.models.ir_vol_cube_data import IrVolCubeData
from lusid.models.ir_vol_cube_data_all_of import IrVolCubeDataAllOf
from lusid.models.is_business_day_response import IsBusinessDayResponse
from lusid.models.label_value_set import LabelValueSet
from lusid.models.leg_definition import LegDefinition
from lusid.models.legal_entity import LegalEntity
from lusid.models.level_step import LevelStep
from lusid.models.link import Link
from lusid.models.list_aggregation_reconciliation import ListAggregationReconciliation
from lusid.models.list_aggregation_response import ListAggregationResponse
from lusid.models.list_complex_market_data_with_meta_data_response import ListComplexMarketDataWithMetaDataResponse
from lusid.models.lusid_instrument import LusidInstrument
from lusid.models.lusid_problem_details import LusidProblemDetails
from lusid.models.lusid_unique_id import LusidUniqueId
from lusid.models.lusid_validation_problem_details import LusidValidationProblemDetails
from lusid.models.market_context import MarketContext
from lusid.models.market_context_suppliers import MarketContextSuppliers
from lusid.models.market_data_key_rule import MarketDataKeyRule
from lusid.models.market_data_specific_rule import MarketDataSpecificRule
from lusid.models.market_data_type import MarketDataType
from lusid.models.market_observable_type import MarketObservableType
from lusid.models.market_options import MarketOptions
from lusid.models.market_quote import MarketQuote
from lusid.models.metric_value import MetricValue
from lusid.models.model_options import ModelOptions
from lusid.models.model_options_type import ModelOptionsType
from lusid.models.model_property import ModelProperty
from lusid.models.model_selection import ModelSelection
from lusid.models.movement_type import MovementType
from lusid.models.next_value_in_sequence_response import NextValueInSequenceResponse
from lusid.models.numeric_comparison_type import NumericComparisonType
from lusid.models.opaque_market_data import OpaqueMarketData
from lusid.models.opaque_market_data_all_of import OpaqueMarketDataAllOf
from lusid.models.opaque_model_options import OpaqueModelOptions
from lusid.models.opaque_model_options_all_of import OpaqueModelOptionsAllOf
from lusid.models.operand_type import OperandType
from lusid.models.operation import Operation
from lusid.models.operator import Operator
from lusid.models.order import Order
from lusid.models.order_by_spec import OrderBySpec
from lusid.models.order_graph_block import OrderGraphBlock
from lusid.models.order_graph_block_allocation_detail import OrderGraphBlockAllocationDetail
from lusid.models.order_graph_block_allocation_synopsis import OrderGraphBlockAllocationSynopsis
from lusid.models.order_graph_block_execution_detail import OrderGraphBlockExecutionDetail
from lusid.models.order_graph_block_execution_synopsis import OrderGraphBlockExecutionSynopsis
from lusid.models.order_graph_block_order_detail import OrderGraphBlockOrderDetail
from lusid.models.order_graph_block_order_synopsis import OrderGraphBlockOrderSynopsis
from lusid.models.order_graph_block_placement_detail import OrderGraphBlockPlacementDetail
from lusid.models.order_graph_block_placement_synopsis import OrderGraphBlockPlacementSynopsis
from lusid.models.order_graph_placement import OrderGraphPlacement
from lusid.models.order_graph_placement_allocation_detail import OrderGraphPlacementAllocationDetail
from lusid.models.order_graph_placement_allocation_synopsis import OrderGraphPlacementAllocationSynopsis
from lusid.models.order_graph_placement_execution_detail import OrderGraphPlacementExecutionDetail
from lusid.models.order_graph_placement_execution_synopsis import OrderGraphPlacementExecutionSynopsis
from lusid.models.order_graph_placement_order_detail import OrderGraphPlacementOrderDetail
from lusid.models.order_graph_placement_order_synopsis import OrderGraphPlacementOrderSynopsis
from lusid.models.order_graph_placement_placement_synopsis import OrderGraphPlacementPlacementSynopsis
from lusid.models.order_instruction import OrderInstruction
from lusid.models.order_instruction_request import OrderInstructionRequest
from lusid.models.order_instruction_set_request import OrderInstructionSetRequest
from lusid.models.order_request import OrderRequest
from lusid.models.order_set_request import OrderSetRequest
from lusid.models.otc_confirmation import OtcConfirmation
from lusid.models.output_transaction import OutputTransaction
from lusid.models.package import Package
from lusid.models.package_request import PackageRequest
from lusid.models.package_set_request import PackageSetRequest
from lusid.models.paged_resource_list_of_allocation import PagedResourceListOfAllocation
from lusid.models.paged_resource_list_of_block import PagedResourceListOfBlock
from lusid.models.paged_resource_list_of_calendar import PagedResourceListOfCalendar
from lusid.models.paged_resource_list_of_corporate_action_source import PagedResourceListOfCorporateActionSource
from lusid.models.paged_resource_list_of_custom_entity_definition import PagedResourceListOfCustomEntityDefinition
from lusid.models.paged_resource_list_of_custom_entity_response import PagedResourceListOfCustomEntityResponse
from lusid.models.paged_resource_list_of_cut_label_definition import PagedResourceListOfCutLabelDefinition
from lusid.models.paged_resource_list_of_data_type_summary import PagedResourceListOfDataTypeSummary
from lusid.models.paged_resource_list_of_execution import PagedResourceListOfExecution
from lusid.models.paged_resource_list_of_instrument import PagedResourceListOfInstrument
from lusid.models.paged_resource_list_of_legal_entity import PagedResourceListOfLegalEntity
from lusid.models.paged_resource_list_of_order import PagedResourceListOfOrder
from lusid.models.paged_resource_list_of_order_graph_block import PagedResourceListOfOrderGraphBlock
from lusid.models.paged_resource_list_of_order_graph_placement import PagedResourceListOfOrderGraphPlacement
from lusid.models.paged_resource_list_of_order_instruction import PagedResourceListOfOrderInstruction
from lusid.models.paged_resource_list_of_package import PagedResourceListOfPackage
from lusid.models.paged_resource_list_of_participation import PagedResourceListOfParticipation
from lusid.models.paged_resource_list_of_person import PagedResourceListOfPerson
from lusid.models.paged_resource_list_of_placement import PagedResourceListOfPlacement
from lusid.models.paged_resource_list_of_portfolio_group_search_result import PagedResourceListOfPortfolioGroupSearchResult
from lusid.models.paged_resource_list_of_portfolio_search_result import PagedResourceListOfPortfolioSearchResult
from lusid.models.paged_resource_list_of_property_definition_search_result import PagedResourceListOfPropertyDefinitionSearchResult
from lusid.models.paged_resource_list_of_relationship_definition import PagedResourceListOfRelationshipDefinition
from lusid.models.paged_resource_list_of_sequence_definition import PagedResourceListOfSequenceDefinition
from lusid.models.participation import Participation
from lusid.models.participation_request import ParticipationRequest
from lusid.models.participation_set_request import ParticipationSetRequest
from lusid.models.performance_return import PerformanceReturn
from lusid.models.performance_returns_metric import PerformanceReturnsMetric
from lusid.models.period_type import PeriodType
from lusid.models.perpetual_entity_state import PerpetualEntityState
from lusid.models.perpetual_property import PerpetualProperty
from lusid.models.person import Person
from lusid.models.placement import Placement
from lusid.models.placement_request import PlacementRequest
from | |
<reponame>sergimasot/PycQED_py3
import traceback
import logging
log = logging.getLogger(__name__)
import re
import os
from copy import deepcopy
from pycqed.analysis_v3 import saving as save_mod
from pycqed.analysis_v3 import helper_functions as hlp_mod
from numpy import array # Needed for eval. Do not remove.
search_modules = set()
search_modules.add(hlp_mod)
###################################################################
#### This module creates a processing pipeline for analysis_v3 ####
###################################################################
"""
The pipeline is a list of dictionaries.
Each dictionary contains
- NECESSARILY the key "node_name" with value being a string specifying
the name of a processing function within analysis_v3
- NECESSARILY the key "keys_in" with value a list of strings that
specify the keys that are already in data_dict that correspond to
the data arrays to be processed by the current node.
- NECESSARILY the key "meas_obj_names" which contains a string or a
list of strings specifying the name(s) of the object(s) measured in
the experiment.
These can be for example qubits (['qb1', 'qb2', 'qb3']), or anything
else ('TWPA', 'dummy', ['test1', 'test2'] etc.)
- VERY OFTEN the key "keys_out" with value a list of strings
specifiying the key names under which the data processed by the current
node will be save in the data_dict.
- any other keyword arguments that the current node might require
From here on I will refer to the processing functions in the pipeline as
nodes.
Instructions for use:
Initialization
- from a list of dicts: ProcessingPipeline(dict_list)
- without any input arguments: ProcessingPipeline()
- or with input parameters:
ProcessingPipeline(node_name, **node_params), where node_name is
the name of the node, and **node_params all the parameters
required by the node including the necessary keys described above
! Specify the keyword argument global_keys_out_container to prepend it
to all the keys_out as global_keys_out_container.keyo.
! For ease of use, keys_in can also be specified as
- 'raw': the raw data corresponding to the measured object
- 'previous': the keys_out of the previous node dictionary
for the measured object.
- 'previous node_name': the keys_out of the
dictionary for the measured object which has the node_name.
Use 'previous node_namei' where i is the i'th identical appearance
of node_name in the pipeline for that meas_obj.
! keys_out do not need to be specified by the user as they will be
automatically constructed from the measured object name and the
keys_in
! use keys_out_container in the **node_params to prepend it to the
keys_out of that node
Examples:
ProcessingPipeline('average_data',
keys_in='raw',
shape=(3,2),
meas_obj_names='TWPA')
ProcessingPipeline('ramsey_analysis',
keys_in='previous rotate_iq',
meas_obj_names=['qb1', 'qb2'])
Adding processing node dictionaries:
- to add more node dictionaries to the pipeline, call the "add_node"
method with the same "node_name" and **node_params arguments as
described above under "Initialization."
Example: same as above but replace ProcessingPipeline with
ProcessingPipeline_instance.add_node
Up to now, the pipeline is just a list of dictionaries with the
key-value pairs as provided by the user:
Example of a "raw" pipeline:
[{'keys_in': 'raw',
'shape': (80, 10),
'meas_obj_names': ['qb2'],
'node_name': 'average_data'},
{'keys_in': 'previous qb2.average_data',
'shape': (10, 8),
'averaging_axis': 0,
'meas_obj_names': ['qb2'],
'update_key': False,
'node_name': 'average_data'},
{'meas_obj_names': ['qb2'],
'keys_out': None,
'keys_in': 'previous qb2.average_data1',
'std_keys': 'previous qb2.get_std_deviation1',
'node_name': 'SingleQubitRBAnalysis'}]
Creating the pipeline:
- the analysis framework always expects keys_in to be a list of
keys in the data_dict, and most functions expect keys_out
- to create the pipeline that will be used by the analysis
framework, the user can call:
ProcessingPipeline_instance(meas_obj_value_names_map), where
meas_obj_value_names_map is a dictionary with measured objects as keys
and list of their corresponding readout channels as values.
However, the analysis supports an precompiled pipeline as well, in
which case it will call ProcessingPipeline_instance(
meas_obj_value_names_map).
The final pipeline corresponding to the"raw" pipeline above:
meas_obj_value_names_map = {'qb2': ['UHF1_pg w23 UHF1',
'UHF1_pe w23 UHF1',
'UHF1_pf w23 UHF1']}
Final pipeline:
[{'keys_in': ['<KEY>', 'U<KEY>',
'UHF1_pf w23 UHF1'],
'shape': (80, 10),
'meas_obj_names': ['qb2'],
'node_name': 'average_data',
'keys_out': ['qb2.average_data UHF1_pg w23 UHF1',
'qb2.average_data UHF1_pe w23 UHF1',
'qb2.average_data UHF1_pf w23 UHF1']},
{'keys_in': ['qb2.average_data UHF1_pe w23 UHF1',
'qb2.average_data UHF1_pf w23 UHF1',
'qb2.average_data UHF1_pg w23 UHF1'],
'shape': (10, 8),
'averaging_axis': 0,
'meas_obj_names': ['qb2'],
'update_key': False,
'node_name': 'average_data',
'keys_out': ['qb2.average_data1 UHF1_pe w23 UHF1',
'qb2.average_data1 UHF1_pf w23 UHF1',
'qb2.average_data1 UHF1_pg w23 UHF1']},
{'meas_obj_names': ['qb2'],
'keys_out': None,
'keys_in': ['qb2.average_data1 UHF1_pe w23 UHF1',
'qb2.average_data1 UHF1_pf w23 UHF1',
'qb2.average_data1 UHF1_pg w23 UHF1'],
'std_keys': 'previous qb2.get_std_deviation1',
'node_name': 'SingleQubitRBAnalysis'}]
Final example where some meas_obj_names are lists: multi-file 2QB RB
meas_obj_value_names_map = {
'qb2': ['UHF1_pg w23 UHF1', 'UHF1_pe w23 UHF1', 'UHF1_pf w23 UHF1'],
'qb4': ['UHF1_pg w45 UHF1', 'UHF1_pe w45 UHF1', 'UHF1_pf w45 UHF1'],
'correlation': ['correlation']}
nr_files = 10
nr_cliffs = 8
nr_seeds_per_file = 10
pp = pp_mod.ProcessingPipeline()
# average data for all measued objects
pp.add_node('average_data', keys_in='raw',
shape=(nr_files*nr_cliffs, nr_seeds_per_file),
meas_obj_names=list(movnm)))
# average data again for all measued objects
pp.add_node('average_data',
keys_in=[f'previous {mobj}.average_data' for mobj in movnm],
shape=(nr_files, nr_cliffs),
averaging_axis=0,
meas_obj_names=list(movnm))
# RB only for qubit2
mobj = 'qb2'
pp.add_node('SingleQubitRBAnalysis',
keys_in=f'previous {mobj}.average_data1',
std_keys=f'previous {mobj}.get_std_deviation1'
keys_out=None, # no keys out
meas_obj_names=mobj)
"Raw" pipeline:
[{'keys_in': 'raw',
'shape': (80, 10),
'meas_obj_names': ['qb2', 'qb4', 'correlation'],
'node_name': 'average_data'},
{'keys_in': ['previous qb2.average_data',
'previous qb4.average_data',
'previous correlation.average_data'],
'shape': (10, 8),
'averaging_axis': 0,
'meas_obj_names': ['qb2', 'qb4', 'correlation'],
'node_name': 'average_data'},
{'meas_obj_names': 'qb2',
'keys_out': None,
'keys_in': 'previous qb2.average_data1',
'std_keys': 'previous qb2.get_std_deviation1',
'node_name': 'SingleQubitRBAnalysis'}]
Final pipeline:
call pp(movnm):
[{'keys_in': ['<KEY>', 'UHF1_pe w45 UHF1', 'UHF1_pf w23 UHF1',
'UHF1_pf w45 UHF1', 'UHF1_pg w23 UHF1', 'UHF1_pg w45 UHF1',
'correlation'],
'shape': (80, 10),
'meas_obj_names': ['qb2', 'qb4', 'correlation'],
'node_name': 'average_data',
'keys_out': ['qb2.average_data UHF1_pe w23 UHF1',
'qb4.average_data UHF1_pe w45 UHF1',
'qb2.average_data UHF1_pf w23 UHF1',
'qb4.average_data UHF1_pf w45 UHF1',
'qb2.average_data UHF1_pg w23 UHF1',
'qb4.average_data UHF1_pg w45 UHF1',
'correlation.average_data correlation']
},
{'keys_in': ['correlation.average_data correlation',
'qb2.average_data UHF1_pe w23 UHF1',
'qb2.average_data UHF1_pf w23 UHF1',
'qb2.average_data UHF1_pg w23 UHF1',
'qb4.average_data UHF1_pe w45 UHF1',
'qb4.average_data UHF1_pf w45 UHF1',
'qb4.average_data UHF1_pg w45 UHF1'],
'shape': (10, 8),
'averaging_axis': 0,
'meas_obj_names': ['qb2', 'qb4', 'correlation'],
'node_name': 'average_data',
'keys_out': ['correlation.average_data1 correlation',
'qb2.average_data1 UHF1_pe w23 UHF1',
'qb2.average_data1 UHF1_pf w23 UHF1',
'qb2.average_data1 UHF1_pg w23 UHF1',
'qb4.average_data1 UHF1_pe w45 UHF1',
'qb4.average_data1 UHF1_pf w45 UHF1',
'qb4.average_data1 UHF1_pg w45 UHF1']
},
{'meas_obj_names': ['qb2'],
'keys_out': None,
'keys_in': ['qb2.average_data1 U<KEY>',
'qb2.average_data1 UHF1_pf w23 UHF1',
'qb2.average_data1 UHF1_pg w23 UHF1'],
'std_keys': 'previous qb2.get_std_deviation1',
'node_name': 'SingleQubitRBAnalysis'
}]
"""
class ProcessingPipeline(list):
global_node_param_defaults = {'keys_out_container': '',
'meas_obj_names': None,
'add_param_method': None}
def __init__(self, pipeline=None, **kw):
"""
Creates a processing pipeline for analysis_v3.
:param pipeline: repr of a ProcessingPipeline instance, or list of dicts
:param global_
:param kw: keyword arguments. Used to create global_node_param_values
with user provided values to the keys in global_node_param_defaults:
- keys_out_container: str specifying a container for the
keys_out that will be prepended to all the keys_out in all the
nodes in the pipeline
- meaj_obj_names: str or list of str specifying the measured
object names for all the nodes in the pipeline
"""
super().__init__()
if isinstance(pipeline, list):
self._add_dict_list(pipeline)
elif isinstance(pipeline, str):
self._add_dict_list(eval(pipeline))
self.data_dict = {}
self.global_node_param_values = {k: kw.get(k, val) for k, val in
self.global_node_param_defaults.items()
}
def __getitem__(self, i):
new_instance = super().__getitem__(i)
if type(i) == slice:
new_instance = self.__class__(new_instance)
self._set_attributes_to_other(new_instance)
return new_instance
def __add__(self, other):
for p, v in self.global_node_param_values.items():
if other.global_node_param_values[p] != \
self.global_node_param_defaults[p] and \
other.global_node_param_values[p] != v:
# cannot add pipelines that do not have the same global
# node params. Exception: if other has the values from
# global_node_param_defaults, they will be overwritten with
# those from self.global_node_param_values
raise ValueError(f'Cannot add, the two pipelines do not '
f'have the same value for the attribute '
f'{p}.')
new_instance = self.__class__(super().__add__(other))
self._set_attributes_to_other(new_instance)
return new_instance
def __call__(self, *args, **params):
self.run(*args, **params)
def _set_attributes_to_other(self, other):
"""
Update other.__dict__ with self.__dict__. Values will be deepcopied,
if possible.
:param other: other instance of this class
"""
for attr, value in self.__dict__.items():
# value_to_assign = value
# if attr == 'data_dict':
value_to_assign = {}
value_to_assign.update(other.__dict__[attr])
value_to_assign.update(value)
try:
value_to_assign = deepcopy(value_to_assign)
except Exception as e:
log.warning(f'Unable to deepcopy data_dict: {e}.'
f'\nSetting the un-copied instance.')
other.__dict__[attr] = value_to_assign
def _add_dict_list(self, dict_list):
"""
Add the dicts in dict_list to the pipeline.
Assumes that dicts have the same format as this class!
:param dict_list: list of dicts
"""
for d in dict_list:
if isinstance(d, dict):
self.append(d)
else:
raise ValueError('Entries | |
# // 1 indicates the objection is being raised.
#
def m_propagate (self, obj, source_obj, description, count, raise_, in_top_thread):
if obj is not None and obj != self.m_top:
obj = self.m_get_parent(obj)
if(raise_):
self.m_raise(obj, source_obj, description, count)
else:
self.m_drop(obj, source_obj, description, count, in_top_thread)
# // Group: Objection Control
#
# // Function: set_propagate_mode
# // Sets the propagation mode for this objection.
# //
# // By default, objections support hierarchical propagation for
# // components. For example, if we have the following basic
# // component tree:
# //
# //| uvm_top.parent.child
# //
# // Any objections raised by 'child' would get propagated
# // down to parent, and then to uvm_test_top. Resulting in the
# // following counts and totals:
# //
# //| | count | total |
# //| uvm_top.parent.child | 1 | 1 |
# //| uvm_top.parent | 0 | 1 |
# //| uvm_top | 0 | 1 |
# //|
# //
# // While propagations such as these can be useful, if they are
# // unused by the testbench then they are simply an unnecessary
# // performance hit. If the testbench is not going to use this
# // functionality, then the performance can be improved by setting
# // the propagation mode to 0.
# //
# // When propagation mode is set to 0, all intermediate callbacks
# // between the ~source~ and ~top~ will be skipped. This would
# // result in the following counts and totals for the above objection:
# //
# //| | count | total |
# //| uvm_top.parent.child | 1 | 1 |
# //| uvm_top.parent | 0 | 0 |
# //| uvm_top | 0 | 1 |
# //|
# //
# // Since the propagation mode changes the behavior of the objection,
# // it can only be safely changed if there are no objections ~raised~
# // or ~draining~. Any attempts to change the mode while objections
# // are ~raised~ or ~draining~ will result in an error.
# //
# function void set_propagate_mode (bit prop_mode)
# if (!self.m_top_all_dropped && (get_objection_total() != 0)) begin
# `uvm_error("UVM/BASE/OBJTN/PROP_MODE",
# {"The propagation mode of '", this.get_full_name(),
# "' cannot be changed while the objection is raised ",
# "or draining!"})
# return
# end
#
# self.m_prop_mode = prop_mode
# endfunction : set_propagate_mode
#
# // Function: get_propagate_mode
# // Returns the propagation mode for this objection.
# function bit get_propagate_mode()
# return self.m_prop_mode
# endfunction : get_propagate_mode
#
# // Function: raise_objection
# //
# // Raises the number of objections for the source ~object~ by ~count~, which
# // defaults to 1. The ~object~ is usually the ~this~ handle of the caller.
# // If ~object~ is not specified or ~null~, the implicit top-level component,
# // <uvm_root>, is chosen.
# //
# // Raising an objection causes the following.
# //
# // - The source and total objection counts for ~object~ are increased by
# // ~count~. ~description~ is a string that marks a specific objection
# // and is used in tracing/debug.
# //
# // - The objection's <raised> virtual method is called, which calls the
# // <uvm_component::raised> method for all of the components up the
# // hierarchy.
# //
def raise_objection (self, obj=None, description="", count=1):
if obj is None:
obj = self.m_top
self.m_cleared = 0
self.m_top_all_dropped = 0
uvm_debug(self, 'raise_objection', obj.get_name() + " Starting to raise objection")
self.m_raise(obj, obj, description, count)
# // Function- m_raise
def m_raise(self, obj, source_obj, description="", count=1):
idx = 0
ctxt = None # uvm_objection_context_object
# Ignore raise if count is 0
if count == 0:
return
if obj in self.m_total_count:
self.m_total_count[obj] += count
else:
self.m_total_count[obj] = count
if source_obj == obj:
if obj in self.m_source_count:
self.m_source_count[obj] += count
else:
self.m_source_count[obj] = count
if self.m_trace_mode:
self.m_report(obj,source_obj,description,count,"raised")
self.raised(obj, source_obj, description, count)
# Handle any outstanding drains...
# First go through the scheduled list
idx = 0
m_scheduled_list = UVMObjection.m_scheduled_list
while idx < len(m_scheduled_list):
if ((m_scheduled_list[idx].obj == obj) and
(m_scheduled_list[idx].objection == self)):
# Caught it before the drain was forked
ctxt = m_scheduled_list[idx]
del m_scheduled_list[idx]
break
idx += 1
# If it's not there, go through the forked list
if ctxt is None:
idx = 0
while idx < len(self.m_forked_list):
if (self.m_forked_list[idx].obj == obj):
# Caught it after the drain was forked,
# but before the fork started
ctxt = self.m_forked_list[idx]
del self.m_forked_list[idx]
del self.m_scheduled_contexts[ctxt.obj]
break
idx += 1
# If it's not there, go through the forked contexts
if ctxt is None:
if obj in self.m_forked_contexts:
# Caught it with the forked drain running
ctxt = self.m_forked_contexts[obj]
del self.m_forked_contexts[obj]
# Kill the drain
uvm_debug(self, 'm_raise', obj.get_name() + " ENDING FUNC")
# TODO
#if UVM_USE_PROCESS_CONTAINER:
# self.m_drain_proc[obj].kill()
# del self.m_drain_proc[obj]
#else:
# self.m_drain_proc[obj].p.kill()
# del self.m_drain_proc[obj]
uvm_debug(self, 'm_raise', obj.get_name() + " NEVER GETS HERE")
if ctxt is None:
# If there were no drains, just propagate as usual
if not self.m_prop_mode and obj != self.m_top:
uvm_debug(self, 'm_raise', obj.get_name() + " XXX NEVER GETS HERE")
self.m_raise(self.m_top,source_obj,description,count)
elif obj != self.m_top:
self.m_propagate(obj, source_obj, description, count, 1, 0)
else:
# Otherwise we need to determine what exactly happened
diff_count = 0
# Determine the diff count, if it's positive, then we're
# looking at a 'raise' total, if it's negative, then
# we're looking at a 'drop', but not down to 0. If it's
# a 0, that means that there is no change in the total.
diff_count = count - ctxt.count
if diff_count != 0:
# Something changed
if diff_count > 0:
# we're looking at an increase in the total
if not self.m_prop_mode and obj != self.m_top:
self.m_raise(self.m_top, source_obj, description, diff_count)
elif obj != self.m_top:
self.m_propagate(obj, source_obj, description, diff_count, 1, 0)
else:
# we're looking at a decrease in the total
# The count field is always positive...
diff_count = -diff_count
if not self.m_prop_mode and obj != self.m_top:
self.m_drop(self.m_top, source_obj, description, diff_count)
elif obj != self.m_top:
self.m_propagate(obj, source_obj, description, diff_count, 0, 0)
# Cleanup
ctxt.clear()
UVMObjection.m_context_pool.append(ctxt)
# endfunction
# // Function: drop_objection
# //
# // Drops the number of objections for the source ~object~ by ~count~, which
# // defaults to 1. The ~object~ is usually the ~this~ handle of the caller.
# // If ~object~ is not specified or ~null~, the implicit top-level component,
# // <uvm_root>, is chosen.
# //
# // Dropping an objection causes the following.
# //
# // - The source and total objection counts for ~object~ are decreased by
# // ~count~. It is an error to drop the objection count for ~object~ below
# // zero.
# //
# // - The objection's <dropped> virtual method is called, which calls the
# // <uvm_component::dropped> method for all of the components up the
# // hierarchy.
# //
# // - If the total objection count has not reached zero for ~object~, then
# // the drop is propagated up the object hierarchy as with
# // <raise_objection>. Then, each object in the hierarchy will have updated
# // their ~source~ counts--objections that they originated--and ~total~
# // counts--the total number of objections by them and all their
# // descendants.
# //
# // If the total objection count reaches zero, propagation up the hierarchy
# // is deferred until a configurable drain-time has passed and the
# // <uvm_component::all_dropped> callback for the current hierarchy level
# // has returned. The following process occurs for each instance up
# // the hierarchy from the source caller:
# //
# // A process is forked in a non-blocking fashion, allowing the ~drop~
# // call to return. The forked process then does the following:
# //
# // - If a drain time | |
raya=79*'-'
archivo=open('impresion.txt','w')
archivo.write(head+'\n')
archivo.write('Producto:'+str(codigo_prod)+' '+str(descrip_prod)+'\n\n')
archivo.write(raya+'\n')
archivo.write(titulo+'\n')
archivo.write(raya+'\n')
for lin in lineas:
try:
archivo.write(lin+'\n')
except:
data=string.ljust(str(lin[0]),16)+string.ljust(str(lin[1]),50)+string.rjust(str(lin[2]),13)
archivo.write(data+'\n')
archivo.close()
try:
os.system('lpr '+print_buffer+' impresion.txt')
resp=segur("IMPRESION EXITOSA!!!")
except:
resp=segur("ERROR EN LA IMPRESION!!!")
else:
psey=0
temc=abs(psey)
if temc>len(lineas):
psey=0
def anulacion_guia(doc_modo=1,doc_tipo=6,oper_log_pref='1'):
n_doc_prefijo=''
n_doc_base=''
filtro,temp_filt=ing_dat('Guia',0)
if len(filtro)>0:
temp = filtro.split('-')
if len(temp) == 2:
n_doc_prefijo=str(temp[0])
n_doc_base=str(temp[1])
query_trans=[]
sql="""select concat(n_doc_prefijo,'-',n_doc_base),
concat(almacen_origen,'/',almacen_destino,'-',
operacion_logistica,'=',codbarras,'->',if(modo=1,ingreso,
salida)) from almacenes where tipo_doc='%s' and estado='1' and
n_doc_prefijo='%s' and n_doc_base='%s' and modo_doc='%s'
and almacen='%s'""" % (doc_tipo,n_doc_prefijo,n_doc_base,
doc_modo,alm_base)
cuenta,resultado=query(sql,1)
if cuenta>0:
n_doc,op_doc=ladocl(resultado,'Guias')
if op_doc == 'Anular':
return 0
mensaje="Anular el Documento:"+str(n_doc)+",Esta Seguro?"
resp=segur(mensaje)
if resp == 'si':
temp_doc = n_doc.split('-')
n_pre=temp_doc[0]
n_doc=temp_doc[1]
sql="update almacenes set estado='0' where n_doc_base='"+str(n_doc)+"' and n_doc_prefijo='"+str(n_pre)+"' and (modo='"+str(oper_log_pref)+"1' or modo='"+str(oper_log_pref)+"2') and tipo_doc='"+str(doc_tipo)+"' and estado='1'"
query_trans.append(sql)
sql="select concat(n_serie_relacion,'-',n_doc_relacion) from almacenes where n_doc_base='"+str(n_doc)+"' and n_doc_prefijo='"+str(n_pre)+"' and (modo='"+str(oper_log_pref)+"1' or modo='"+str(oper_log_pref)+"2') and tipo_doc='"+str(doc_tipo)+"' and estado='1' and modo_doc='"+str(doc_modo)+"'"
cuenta2,resultado2=query(sql,0)
if cuenta2>0:
if len(resultado2[0])>0:
n_doc2=resultado2[0]
temp_doc2 = n_doc2.split('-')
n_pre2=temp_doc2[0]
n_doc2=temp_doc2[1]
sql="update almacenes set estado='0' where n_doc_base='"+str(n_doc2)+"' and n_doc_prefijo='"+str(n_pre2)+"' and (modo='"+str(oper_log_pref)+"1' or modo='"+str(oper_log_pref)+"2') and tipo_doc='"+str(doc_tipo)+"' and estado='1'"
query_trans.append(sql)
estado=query(query_trans,5)
if estado == 1:
return 1
else:
return -1
else:
return 0
def anulacion_ventas():#doc_modo=1,doc_tipo=6,oper_log_pref='1'):
sql="select documento,nombre from documentos_comerciales where modo=5 and documento!='' order by documento"
cuenta,resultado=query(sql,1)
if cuenta>0:
cod_doc,dscp_doc=ladocl(resultado,'Tipo')
if cod_doc == 'Anular':
return 0
filtro,temp_filt=ing_dat('No',0)
if len(filtro)>0:
temp = filtro.split('-')
if len(temp) == 2:
n_doc_prefijo=str(temp[0])
n_doc_base=str(temp[1])
query_trans=[]
sql="select concat(n_doc_prefijo,'-',n_doc_base),concat(codigo,'-',cantidad,'->',total) from docventa where estado='B' and comprobante='"+str(cod_doc)+"' and n_doc_prefijo='"+n_doc_prefijo+"' and n_doc_base='"+n_doc_base+"'"
cuenta,resultado=query(sql,1)
if cuenta>0:
n_doc,op_doc=ladocl(resultado,'No')
if op_doc == 'Anular':
return 0
mensaje="Anular el Documento:"+str(n_doc)+",Esta Seguro?"
resp=segur(mensaje)
if resp == 'si':
temp_doc = n_doc.split('-')
n_pre=temp_doc[0]
n_doc=temp_doc[1]
sql="update docventa set estado='A',cv_anul='"+str(codven)+"' where n_doc_base='"+str(n_doc)+"' and n_doc_prefijo='"+str(n_pre)+"' and comprobante='"+str(cod_doc)+"'"
query_trans.append(sql)
estado=query(query_trans,5)
if estado == 1:
return 1
else:
return -1
else:
mensaje="Desea registrar este documento como Anulado?"
resp=segur(mensaje)
if resp == 'si':
fecha=fecha_ing(1,'t')
if fecha == 'Anular':
return 0
sql="insert into docventa (n_doc_prefijo,n_doc_base,comprobante,condicion_comercial,estado,cv_ing,cv_anul,fecha_vta) values ('"+n_doc_prefijo+"','"+n_doc_base+"','"+str(cod_doc)+"','1','A','"+str(codven)+"','"+str(codven)+"','"+str(fecha)+"')";
query_trans.append(sql)
estado=query(query_trans,5)
if estado == 1:
return 1
else:
return -1
return 0
def reimpresion_guia(doc_modo=1,doc_tipo=6,oper_log_pref='1'):
n_doc_prefijo=''
n_doc_base=''
filtro,temp_filt=ing_dat('Guia',0)
if len(filtro)>0:
temp = filtro.split('-')
if len(temp) == 2:
n_doc_prefijo=str(temp[0])
n_doc_base=str(temp[1])
query_trans=[]
sql = """select concat(n_doc_prefijo,'-',n_doc_base),
concat(almacen_origen,'/',almacen_destino,'-',
operacion_logistica,'=',codbarras,'->',ingreso,salida) from
almacenes where tipo_doc='%s' and estado='1' and
n_doc_prefijo='%s' and n_doc_base='%s' and modo_doc='%s' and
almacen='%s'""" % (doc_tipo,n_doc_prefijo,n_doc_base,
doc_modo,alm_base)
cuenta,resultado=query(sql,1)
if cuenta>0:
n_doc,op_doc=ladocl(resultado,'Guias')
if op_doc == 'Anular':
return 0
mensaje="Reimprimir el Documento: %s, Esta Seguro?" % n_doc
resp=segur(mensaje)
if resp == 'si':
temp_doc = n_doc.split('-')
n_pre=temp_doc[0]
n_doc=temp_doc[1]
if doc_tipo == 6:
impresion_guia_interna(doc_modo,doc_tipo,oper_log_pref,n_pre,n_doc)
elif doc_tipo == 5:
impresion_guia_externa(doc_modo,doc_tipo,oper_log_pref,n_pre,n_doc)
#def impresion_guia_interna(productos,fecha,hora,documento,operacion,almacen,prefijo,num_doc):
def impresion_guia_interna(doc_modo=1,doc_tipo=6,oper_log_pref='1',prefijo='',num_doc='',port_imp='',layout=''):
sql="""select codbarras,if(modo=1,round(ingreso,2),round(salida,2)),
date(fecha_doc),time(tiempo),operacion_logistica,modo,
almacen_origen,almacen_destino,turno from almacenes where
estado='1' and n_doc_prefijo='%s' and n_doc_base='%s' and
modo_doc='%s' and almacen='%s'""" % (prefijo,num_doc,doc_modo,
alm_base)
cuenta,resultado=query(sql)
productos=[]
for parte in resultado:
temporal=[]
temporal.append(str(parte[0]))
temporal.append(str(parte[1]))
productos.append(temporal)
fecha=str(parte[2])
# hora=str(parte[3])
operacion=str(parte[4])
modo=str(parte[5])
if modo[-1] == '1':
almacen=str(parte[6])
documento='Nota de Ingreso'
else:
almacen=str(parte[7])
documento='Nota de Salida'
turno=str(parte[8])
fecha=fecha[:10]
fecha_impresion=time.strftime("%Y-%m-%d")
hora_impresion=time.strftime("%H:%M:%S")
# hora=hora[:8]
sql = """select descripcion from operaciones_logisticas where
id='%s'""" % operacion
cuenta,resultado=query(sql,0)
if cuenta>0:
descripcion_operacion=resultado[0]
descripcion_operacion=descripcion_operacion[:20]
else:
descripcion_operacion=operacion
sql = """select descripcion from almacenes_lista where
id='%s'""" % almacen
cuenta,resultado=query(sql,0)
if cuenta>0:
descripcion_almacen=resultado[0]
descripcion_almacen=descripcion_almacen[:21]
else:
descripcion_almacen=almacen
for n_cop in range(1,3):
if n_cop == 1:
doc_tipo='ORIGINAL'
else:
doc_tipo='COPIA'
n_arch="guia_int_"+str(prefijo)+"-"+str(num_doc)+".gui"
archivo=open(n_arch,"w")
linea=string.ljust(empresa,20)+string.center(' ',17)+string.center(documento,40)
archivo.write(linea+'\n')
linea=string.ljust('',20)+string.center(' ',17)+string.center(descripcion_operacion,40)
archivo.write(linea+'\n')
linea=string.ljust('',20)+string.center(' ',17)+string.center('N:'+str(prefijo)+'-'+str(num_doc),40)
archivo.write(linea+'\n')
linea=string.ljust('Fecha:'+str(fecha),17)+string.ljust('Alm:'+str(almacen)+'-'+str(descripcion_almacen),30)+string.ljust('Oper:'+operacion+'-'+descripcion_operacion,30)
archivo.write(linea+'\n')
raya=77*'-'
archivo.write(raya+'\n')
linea=string.center('CODIGO',10)+string.center('UNI',3)+string.center('CANTIDAD',10)+' '+string.center('DESCRIPCION',53)
archivo.write(linea+'\n')
archivo.write(raya+'\n')
cnt=0
for linea in productos:
cnt+=1
codigo=str(linea[0])
cantidad=str(linea[1])
sql = """select ucase(unm.codigo),if(length(mae.alias)>0,
mae.alias,concat(mae.nombre,' ',mae.descripcion)) from
maestro mae left join unidades_medida unm on
unm.id=mae.unidad_medida where
mae.id='%s'""" % (codigo)
cuenta,resultado=query(sql,0)
unidad_med=resultado[0]
descripcion=resultado[1]
linea=string.center(codigo,10)+string.center(unidad_med,3)+string.rjust(cantidad,10)+' '+string.ljust(descripcion,53)
archivo.write(linea+'\n')
for parte in range(cnt,20):
archivo.write('\n')
archivo.write(raya+'\n')
linea=string.ljust(str(doc_tipo),37)+string.center('Despacho',20)+string.center('Recibo',20)
archivo.write(linea+'\n')
linea=string.ljust('Fecha '+str(fecha_impresion),20)+string.ljust('Hora '+str(hora_impresion),20)+string.ljust('Turno '+str(turno),20)
archivo.write(linea+'\n\n\n\n')
archivo.close()
os.system('lpr '+print_buffer+' '+n_arch)
time.sleep(1)
os.remove(n_arch)
#def impresion_guia_externa(productos,fecha,almacen,transportista,vehiculo,prefijo,num_doc,):
def impresion_guia_externa(doc_modo=1,doc_tipo=5,oper_log_pref='1',prefijo='',num_doc='',port_imp='',layout=''):
#<---Cambiar
# sql="select codigo,descripcion from guias_plantillas where modo='"+str(doc_modo)+"' and documento='"+str(doc_tipo)+"'"
sql="select tipo from guias_plantillas where modo='"+str(doc_modo)+"' and documento='"+str(doc_tipo)+"'"
cuenta,resultado=query(sql)
if cuenta == 1:
layout_guia=int(resultado[0][0])
# codigo=resultado[0][0]
# if codigo == '101':
# layout_guia=0
# elif codigo == '401':
# layout_guia=1
# else:
# layout_guia=0
else:
layout_guia=0
#--->Cambiar
# print cuenta,resultado,layout_guia,codigo
# sys.exit()
sql = """select codbarras,if(modo=1,round(ingreso,2),
round(salida,2)),fecha_doc,tiempo,operacion_logistica,modo,
almacen_origen,almacen_destino,transportista,vehiculo,turno,
observaciones from almacenes where estado='1' and
n_doc_prefijo='%s' and n_doc_base='%s' and modo_doc='%s' and
almacen='%s'""" % (prefijo,num_doc,doc_modo,alm_base)
cuenta,resultado=query(sql)
productos=[]
for parte in resultado:
temporal=[]
temporal.append(str(parte[0]))
temporal.append(str(parte[1]))
productos.append(temporal)
fecha=str(parte[2])
hora=str(parte[3])
operacion=str(parte[4])
modo=str(parte[5])
if modo[-1] == '1':
almacen=str(parte[6])
else:
almacen=str(parte[7])
transportista=str(parte[8])
vehiculo=str(parte[9])
turno=str(parte[10])
observac=str(parte[11])
sql = """select descripcion,doc_id,direccion from almacenes_lista
where id='%s'""" % almacen
cuenta,resultado=query(sql,0)
almacen_descripcion=str(resultado[0])
ruc=str(resultado[1])
direccion=str(resultado[2])
sql = """select nombres,apellidos,direccion,emp_doc_id from
transportistas where codigo='%s'""" % (transportista)
cuenta,resultado=query(sql,0)
transp_nombres=str(resultado[0])
transp_apellidos=str(resultado[1])
transp_emp=str(resultado[2])
sql = """select registro from vehiculos where
codigo='%s'""" % (vehiculo)
cuenta,resultado=query(sql,0)
unidad_movil=str(resultado[0])
#Generacion de Archivo
n_arch="guia_ext_"+str(prefijo)+"-"+str(num_doc)+".gui"
if layout_guia == 1:
archivo=open(n_arch,"w")
archivo.write('\n\n\n\n\n\n')
linea=string.rjust('MACRA S.A.-Tda:'+almacen_descripcion+'-'+str(turno),45)
archivo.write(linea+'\n')
linea=string.rjust(observac,45)
archivo.write(linea+'\n')
linea=string.rjust('20144215649',20)+string.rjust(str(fecha[:10]),25)
archivo.write(linea+'\n')
linea=string.rjust(direccion,45)+string.center(str(prefijo)+'-'+str(num_doc),35)
archivo.write(linea+'\n\n')
linea=string.rjust('',50)+string.rjust(transp_nombres+' '+transp_apellidos,27)
archivo.write(linea+'\n')
linea=string.rjust('',50)+string.rjust('',27)
archivo.write(linea+'\n')
linea=string.rjust('',50)+string.rjust(transp_emp,27)
archivo.write(linea+'\n')
linea=string.rjust('',50)+string.rjust(unidad_movil,27)
archivo.write(linea+'\n')
archivo.write('\n\n\n')
for linea in productos:
codigo=str(linea[0])
cantidad=str(linea[1])
sql = """select ucase(unm.codigo),if(length(mae.alias)>0,
mae.alias,concat(mae.nombre,' ',mae.descripcion)) from
maestro mae left join unidades_medida unm on
unm.id=mae.unidad_medida where
mae.id='%s'""" % (codigo)
cuenta,resultado=query(sql,0)
unidad_med=resultado[0]
descripcion=resultado[1]
linea=string.rjust(cantidad,10)+' '+string.ljust(unidad_med,10)+string.ljust(codigo+'-'+descripcion,40)
archivo.write(linea+'\n')
archivo.close()
else:
archivo=open(n_arch,"w")
archivo.write('\n\n\n\n\n')
linea=string.rjust('MACRA S.A.-Tda:'+almacen_descripcion+'-'+str(turno),45)
archivo.write(linea+'\n')
linea=string.rjust(observac,45)
archivo.write(linea+'\n\n')
linea=string.rjust('20144215649',20)+string.rjust(str(fecha[:10]),25)
archivo.write(linea+'\n\n')
linea=string.rjust(direccion,45)+string.center(str(prefijo)+'-'+str(num_doc),35)
archivo.write(linea+'\n\n\n')
linea=string.rjust('',50)+string.rjust(transp_nombres+' '+transp_apellidos,27)
archivo.write(linea+'\n')
linea=string.rjust('',50)+string.rjust('',27)
archivo.write(linea+'\n')
linea=string.rjust('',50)+string.rjust(transp_emp,27)
archivo.write(linea+'\n')
linea=string.rjust('',50)+string.rjust(unidad_movil,27)
archivo.write(linea+'\n')
archivo.write('\n\n\n')
for linea in productos:
codigo=str(linea[0])
cantidad=str(linea[1])
sql = """select ucase(unm.codigo),if(length(mae.alias)>0,
mae.alias,concat(mae.nombre,' ',mae.descripcion)) from
maestro mae left join unidades_medida unm on
unm.id=mae.unidad_medida where
mae.id='%s'""" % (codigo)
cuenta,resultado=query(sql,0)
unidad_med=resultado[0]
descripcion=resultado[1]
linea=string.rjust(cantidad,10)+' '+string.ljust(unidad_med,10)+string.ljust(codigo+'-'+descripcion,40)
archivo.write(linea+'\n')
archivo.close()
os.system('lpr '+print_buffer+' '+n_arch)
time.sleep(1)
os.remove(n_arch)
def win_txt(panel,head,txt):
win=definir(panel)
texto=str(head)+':'+str(txt)
win.addstr(1,1,texto)
updat()
return
def panel_input(head,panel,sql):
dato=ingresodato(head,panel,10,'',1,0)
if dato == 'Anular':
return -1
def linea_dato(msg,win,pan,texto='',pos_y=1):
texto=str(texto)
size_y,size_x=win.getmaxyx()
ubic_x=len(msg)+3
tam_real_x=size_x-2
txt_pre=0
if len(texto)>0:
dato=texto
txt_pre=1
else:
dato=''
win.addstr(pos_y,1,msg)
while 1:
if ubic_x>=tam_real_x:
ubic_x=len(msg)+3
if txt_pre == 1:
win.addstr(pos_y,ubic_x,dato)
ubic_x+=len(dato)
txt_pre=0
updat()
caracter=obch(pan,pos_y,ubic_x,'v',0)
if caracter == 'enter':
return dato
elif caracter == 'escape':
return 'Anular'
elif caracter == 'arriba' or caracter == 'abajo' or caracter == 'insert' or caracter == 'spag' or caracter == 'ppag' or caracter == 'derecha' or caracter == 'izquierda':
pass
elif caracter == 'backspace':
ubic_x-=1
if ubic_x<=len(msg)+3:
ubic_x=len(msg)+3
dato=dato[:-1]
win.addstr(pos_y,ubic_x,' ')
caracter=''
elif (caracter>='0' and caracter<='9') or (caracter>='a' and caracter<='z') or (caracter>='A' and caracter<='Z') or (caracter == '-') or (caracter == '.') or (caracter == ' ') or (caracter == '&'):
ubic_x+=1
dato+=str(caracter)
if ubic_x >=(tam_real_x):
ubic_x=tam_real_x
dato=dato[:tam_real_x]
def ing_dat(msg,min=4,long=20,posy=-1,posx=-1):
cuenta=len(msg)+long
tny=3
if posy == -1:
posy=(maxy-tny)/2
if posx == -1:
tnx=cuenta
posx=(maxx-tnx)/2
pan=mkpanel(curses.COLOR_WHITE,tny,cuenta,posy,posx)
win=definir(pan)
# win,y,x=definewin(pan)
curses.curs_set(1)
curses.echo()
win.addstr(0,0,msg)
updat()
while 1:
linea=win.getstr(1,1)
valor,tipdat=expresion(linea)
if tipdat == 'entero' and len(linea)>=min:
curses.noecho()
curses.curs_set(0)
win.erase()
updat()
return linea,tipdat
elif (tipdat == 'alfanumerico' or tipdat == 'caracter') and len(linea)>=min:
curses.noecho()
curses.curs_set(0)
win.erase()
updat()
return linea,tipdat
def directory_check(msg):
cliente,tipdat=ing_dat(msg)
if tipdat == 'entero':
condicion = "doc_id like '%s%%'" % (cliente)
else:
condicion = "nombre_corto like '%%%s%%'" % cliente.upper()
sql = "select id,nombre_corto from directorio where %s order by doc_id" % (condicion)
cnt,rso=query(sql)
if cnt > 0:
docnum, nombre = ladocl(rso,msg)
return docnum,nombre
else:
return 'ND','ND'
def ventas_proc(txt_fld=8,fech_cnt=1,fech_hea='t'):
panel_top,panel_text_1,panel_text_2,panel_text_3,panel_text_4,panel_text_5,panel_text_6,panel_text_7,panel_text_8,panel_mid=win_def(txt_fld)#maxy,maxx
fecha=fecha_ing(fech_cnt,fech_hea)
if fecha == 'Anular':
return 0
tienda_head='Tienda'
caja_head='Caja'
doc_head='Tipo Doc'
cond_head='Condicion'
cliente=''
detalle_prod=''
ing_detalle=segur('Ingresos con Detalles?')
if ing_detalle == 'si':
ing_detalle=1
else:
ing_detalle=0
while 1:
tienda=ingresodato(tienda_head,panel_text_1,10,'',1,0)
if tienda == 'Anular':
return 0
sql="select codigo,nombre from puntos_venta where (codigo='"+str(tienda)+"' or nombre like '%"+str(tienda)+"%') and (codigo!='' or nombre!='') order by codigo"
cuenta,resultado=query(sql)
if cuenta == 1:
tienda=resultado[0][0]
tienda_dscp=resultado[0][1]
break
elif cuenta>1:
tienda,tienda_dscp=ladocl(resultado,'Punto de Venta')
if tienda == 'Anular':
return 0
else:
break
win_txt(panel_text_1,tienda_head,tienda_dscp)
sql="select distinct(documento),nombre from documentos_comerciales where modo=5 order by documento"
cuenta,resultado=query(sql)
if cuenta>0:
doc_tipo,doc_tipo_dscp=ladocl(resultado,'Documento')
if doc_tipo == 'Anular':
return 0
sql="select copia,detalle,impresion from documentos_comerciales where modo=5 and documento='"+str(doc_tipo)+"'"
cuenta,resultado=query(sql,0)
if cuenta>0:
doc_tipo_copia=str(resultado[0])
doc_tipo_detalle=str(resultado[1])
doc_tipo_impresion=str(resultado[2])
if doc_tipo_detalle == '1':
while 1:
cliente, nomb_cliente = directory_check("Cliente")
if cliente == 'ND':
segur("Debe Registrar el Cliente")
elif cliente == 'Anular':
return 0
else:
break
else:
return 0
texto_head='Ventas: Tipo:'+str(doc_tipo_dscp)+' - Cliente:'+str(cliente)+' - Fecha:'+str(fecha[:10])
winhead(texto_head,panel_top)
while 1:
num_doc=ingresodato('No.Doc',panel_text_2,10,'',1,0)
if num_doc == 'Anular':
break
doc_part = num_doc.split('-')
try:
if len(doc_part) == 2:
num_doc_pre=str(doc_part[0])
num_doc=int(doc_part[1])
else:
num_doc_pre=''
num_doc=int(doc_part[0])
break
except Exception,error:
pass
if num_doc == 'Anular':
return 0
sql = """select doc.codigo,concat(mae.nombre,' ',mae.descripcion),
doc.cantidad,doc.precio,(doc.cantidad*doc.precio) as importe
from docventa doc left join maestro mae on
mae.id=doc.codigo where doc.n_doc_prefijo='%s' and
doc.n_doc_base='%s' and doc.comprobante='%s' and
doc.pv='%s'""" % (num_doc_pre, num_doc, doc_tipo, tienda)
lineas,tipo=datos_cons(sql)
if len(lineas) == 0:
sql = """select codigo,descripcion from condiciones_comerciales
where modo=0 and condicion!='' order by codigo"""
cuenta,resultado=query(sql)
if cuenta>0:
cond_com,cond_com_dscp=ladocl(resultado,'Condicion')
if cond_com == 'Anular':
return 0
else:
return 0
win_txt(panel_text_3,cond_head,cond_com_dscp)
while 1:
factor_tmp='1.0'
factor=ingresodato('Factor',panel_text_4,10,factor_tmp,1,0)
try:
factor=float(factor)
break
except:
pass
else:
sql="select | |
# -*- coding: utf-8 -*-
"""
pygments.lexers.templates
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for various template engines' markup.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.web import \
PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer, LassoLexer
from pygments.lexers.agile import PythonLexer, PerlLexer
from pygments.lexers.compiled import JavaLexer
from pygments.lexers.jvm import TeaLangLexer
from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
include, using, this
from pygments.token import Error, Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
from pygments.util import html_doctype_matches, looks_like_xml
__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer',
'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer',
'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer']
class ErbLexer(Lexer):
"""
Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
lexer.
Just highlights ruby code between the preprocessor directives, other data
is left untouched by the lexer.
All options are also forwarded to the `RubyLexer`.
"""
name = 'ERB'
aliases = ['erb']
mimetypes = ['application/x-ruby-templating']
_block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
def __init__(self, **options):
from pygments.lexers.agile import RubyLexer
self.ruby_lexer = RubyLexer(**options)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
"""
Since ERB doesn't allow "<%" and other tags inside of ruby
blocks we have to use a split approach here that fails for
that too.
"""
tokens = self._block_re.split(text)
tokens.reverse()
state = idx = 0
try:
while True:
# text
if state == 0:
val = tokens.pop()
yield idx, Other, val
idx += len(val)
state = 1
# block starts
elif state == 1:
tag = tokens.pop()
# literals
if tag in ('<%%', '%%>'):
yield idx, Other, tag
idx += 3
state = 0
# comment
elif tag == '<%#':
yield idx, Comment.Preproc, tag
val = tokens.pop()
yield idx + 3, Comment, val
idx += 3 + len(val)
state = 2
# blocks or output
elif tag in ('<%', '<%=', '<%-'):
yield idx, Comment.Preproc, tag
idx += len(tag)
data = tokens.pop()
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(data):
yield r_idx + idx, r_token, r_value
idx += len(data)
state = 2
elif tag in ('%>', '-%>'):
yield idx, Error, tag
idx += len(tag)
state = 0
# % raw ruby statements
else:
yield idx, Comment.Preproc, tag[0]
r_idx = 0
for r_idx, r_token, r_value in \
self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
yield idx + 1 + r_idx, r_token, r_value
idx += len(tag)
state = 0
# block ends
elif state == 2:
tag = tokens.pop()
if tag not in ('%>', '-%>'):
yield idx, Other, tag
else:
yield idx, Comment.Preproc, tag
idx += len(tag)
state = 0
except IndexError:
return
def analyse_text(text):
if '<%' in text and '%>' in text:
return 0.4
class SmartyLexer(RegexLexer):
"""
Generic `Smarty <http://smarty.php.net/>`_ template lexer.
Just highlights smarty code between the preprocessor directives, other
data is left untouched by the lexer.
"""
name = 'Smarty'
aliases = ['smarty']
filenames = ['*.tpl']
mimetypes = ['application/x-smarty']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(r'[^{]+', Other),
(r'(\{)(\*.*?\*)(\})',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(\{php\})(.*?)(\{/php\})',
bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
Comment.Preproc)),
(r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
(r'\{', Comment.Preproc, 'smarty')
],
'smarty': [
(r'\s+', Text),
(r'\}', Comment.Preproc, '#pop'),
(r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
(r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
(r'(true|false|null)\b', Keyword.Constant),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
]
}
def analyse_text(text):
rv = 0.0
if re.search('\{if\s+.*?\}.*?\{/if\}', text):
rv += 0.15
if re.search('\{include\s+file=.*?\}', text):
rv += 0.15
if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
rv += 0.15
if re.search('\{\$.*?\}', text):
rv += 0.01
return rv
class VelocityLexer(RegexLexer):
"""
Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
Just highlights velocity directives and variable references, other
data is left untouched by the lexer.
"""
name = 'Velocity'
aliases = ['velocity']
filenames = ['*.vm','*.fhtml']
flags = re.MULTILINE | re.DOTALL
identifier = r'[a-zA-Z_][a-zA-Z0-9_]*'
tokens = {
'root': [
(r'[^{#$]+', Other),
(r'(#)(\*.*?\*)(#)',
bygroups(Comment.Preproc, Comment, Comment.Preproc)),
(r'(##)(.*?$)',
bygroups(Comment.Preproc, Comment)),
(r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
'directiveparams'),
(r'(#\{?)(' + identifier + r')(\}|\b)',
bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
(r'\$\{?', Punctuation, 'variable')
],
'variable': [
(identifier, Name.Variable),
(r'\(', Punctuation, 'funcparams'),
(r'(\.)(' + identifier + r')',
bygroups(Punctuation, Name.Variable), '#push'),
(r'\}', Punctuation, '#pop'),
(r'', Other, '#pop')
],
'directiveparams': [
(r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
Operator),
(r'\[', Operator, 'rangeoperator'),
(r'\b' + identifier + r'\b', Name.Function),
include('funcparams')
],
'rangeoperator': [
(r'\.\.', Operator),
include('funcparams'),
(r'\]', Operator, '#pop')
],
'funcparams': [
(r'\$\{?', Punctuation, 'variable'),
(r'\s+', Text),
(r',', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"\b[0-9]+\b", Number),
(r'(true|false|null)\b', Keyword.Constant),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
rv += 0.25
if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
rv += 0.15
if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?'
r'(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text):
rv += 0.01
return rv
class VelocityHtmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexer data
with the `HtmlLexer`.
"""
name = 'HTML+Velocity'
aliases = ['html+velocity']
alias_filenames = ['*.html','*.fhtml']
mimetypes = ['text/html+velocity']
def __init__(self, **options):
super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
**options)
class VelocityXmlLexer(DelegatingLexer):
"""
Subclass of the `VelocityLexer` that highlights unlexer data
with the `XmlLexer`.
"""
name = 'XML+Velocity'
aliases = ['xml+velocity']
alias_filenames = ['*.xml','*.vm']
mimetypes = ['application/xml+velocity']
def __init__(self, **options):
super(VelocityXmlLexer, self).__init__(XmlLexer, VelocityLexer,
**options)
def analyse_text(text):
rv = VelocityLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.5
return rv
class DjangoLexer(RegexLexer):
"""
Generic `django <http://www.djangoproject.com/documentation/templates/>`_
and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
It just highlights django/jinja code between the preprocessor directives,
other data is left untouched by the lexer.
"""
name = 'Django/Jinja'
aliases = ['django', 'jinja']
mimetypes = ['application/x-django-templating', 'application/x-jinja']
flags = re.M | re.S
tokens = {
'root': [
(r'[^{]+', Other),
(r'\{\{', Comment.Preproc, 'var'),
# jinja/django comments
(r'\{[*#].*?[*#]\}', Comment),
# django comments
(r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Comment, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# raw jinja blocks
(r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
Text, Comment.Preproc, Text, Keyword, Text,
Comment.Preproc)),
# filter blocks
(r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
'block'),
(r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Comment.Preproc, Text, Keyword), 'block'),
(r'\{', Other)
],
'varnames': [
(r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Function)),
(r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Keyword, Text, Name.Function)),
(r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
(r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
Keyword),
(r'(loop|block|super|forloop)\b', Name.Builtin),
(r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable),
(r'\.[a-zA-Z0-9_]+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
(r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
'var': [
(r'\s+', Text),
(r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames')
],
'block': [
(r'\s+', Text),
(r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
include('varnames'),
(r'.', Punctuation)
]
}
def analyse_text(text):
rv = 0.0
if re.search(r'\{%\s*(block|extends)', text) is not None:
rv += 0.4
if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
rv += 0.1
if re.search(r'\{\{.*?\}\}', text) is not None:
rv += 0.1
return rv
class MyghtyLexer(RegexLexer):
"""
Generic `myghty templates`_ lexer. Code that isn't Myghty
markup is yielded as `Token.Other`.
.. versionadded:: 0.6
.. _myghty templates: http://www.myghty.org/
"""
name = 'Myghty'
aliases = ['myghty']
filenames = ['*.myt', 'autodelegate']
mimetypes = ['application/x-myghty']
tokens = {
'root': [
(r'\s+', Text),
(r'(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Text, Name.Function, Name.Tag,
using(this), Name.Tag)),
(r'(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
bygroups(Name.Tag, Name.Function, Name.Tag,
using(PythonLexer), Name.Tag)),
(r'(<&[^|])(.*?)(,.*?)?(&>)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
(r'</&>', Name.Tag),
(r'(<%!?)(.*?)(%>)(?s)',
bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
(r'(?<=^)#[^\n]*(\n|\Z)', Comment),
(r'(?<=^)(%)([^\n]*)(\n|\Z)',
bygroups(Name.Tag, using(PythonLexer), Other)),
(r"""(?sx)
(.+?) # anything, followed by:
(?:
(?<=\n)(?=[%#]) | # an eval or comment line
(?=</?[%&]) | # a substitution or block or
# call start or end
# - don't consume
(\\\n) | # an escaped newline
\Z # end of string
)""", bygroups(Other, Operator)),
]
}
class MyghtyHtmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `HtmlLexer`.
.. versionadded:: 0.6
"""
name = 'HTML+Myghty'
aliases = ['html+myghty']
mimetypes = ['text/html+myghty']
def __init__(self, **options):
super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
**options)
class MyghtyXmlLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `XmlLexer`.
.. versionadded:: 0.6
"""
name = 'XML+Myghty'
aliases = ['xml+myghty']
mimetypes = ['application/xml+myghty']
def __init__(self, **options):
super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
**options)
class MyghtyJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `JavascriptLexer`.
.. versionadded:: 0.6
"""
name = 'JavaScript+Myghty'
aliases = ['js+myghty', 'javascript+myghty']
mimetypes = ['application/x-javascript+myghty',
'text/x-javascript+myghty',
'text/javascript+mygthy']
def __init__(self, **options):
super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
MyghtyLexer, **options)
class MyghtyCssLexer(DelegatingLexer):
"""
Subclass of the `MyghtyLexer` that highlights unlexer data
with the `CssLexer`.
.. versionadded:: 0.6
"""
name = 'CSS+Myghty'
aliases | |
<filename>Code/lib/mids_pytorch_model.py
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torchvision.transforms as T
import torchaudio
import librosa
import timm
from nnAudio import features
def get_wav_for_path_pipeline(path_names, sr):
x = []
signal_length = 0
effects = [
["remix", "1"]
]
if sr:
effects.extend([
#["bandpass", f"400",f"1000"],
#["rate", f'{sr}'],
['gain', '-n'],
["highpass", f"200"]
])
for path in path_names:
signal, rate = librosa.load(path, sr=sr)
waveform, _ = torchaudio.sox_effects.apply_effects_tensor(torch.tensor(signal).expand([2,-1]), sample_rate=rate, effects=effects)
f = waveform[0]
mu = torch.std_mean(f)[1]
st = torch.std_mean(f)[0]
# clip amplitudes
signal = torch.clamp(f, min=mu-st*3, max=mu+st*3).unsqueeze(0)
x.append(signal)
signal_length += len(signal[0])/sr
return x, signal_length
class Normalization():
"""This class is for normalizing the spectrograms batch by batch. The normalization used is min-max, two modes 'framewise' and 'imagewise' can be selected. In this paper, we found that 'imagewise' normalization works better than 'framewise'"""
def __init__(self, mode='framewise'):
if mode == 'framewise':
def normalize(x):
size = x.shape
x_max = x.max(1, keepdim=True)[0] # Finding max values for each frame
x_min = x.min(1, keepdim=True)[0]
output = (x-x_min)/(x_max-x_min) # If there is a column with all zero, nan will occur
output[torch.isnan(output)]=0 # Making nan to 0
return output
elif mode == 'imagewise':
def normalize(x):
size = x.shape
x_max = x.reshape(size[0], size[1]*size[2]).max(1, keepdim=True)[0]
x_min = x.reshape(size[0], size[1]*size[2]).min(1, keepdim=True)[0]
x_max = x_max.unsqueeze(1) # Make it broadcastable
x_min = x_min.unsqueeze(1) # Make it broadcastable
return (x-x_min)/(x_max-x_min)
else:
print(f'please choose the correct mode')
self.normalize = normalize
def __call__(self, x):
return self.normalize(x)
# +
def pcen(x, eps=1e-6, s=0.025, alpha=0.98, delta=2, r=0.5, training=False):
frames = x.split(1, -2)
m_frames = []
last_state = None
for frame in frames:
if last_state is None:
last_state = s * frame
m_frames.append(last_state)
continue
if training:
m_frame = ((1 - s) * last_state).add_(s * frame)
else:
m_frame = (1 - s) * last_state + s * frame
last_state = m_frame
m_frames.append(m_frame)
M = torch.cat(m_frames, 1)
if training:
pcen_ = (x / (M + eps).pow(alpha) + delta).pow(r) - delta ** r
else:
pcen_ = x.div_(M.add_(eps).pow_(alpha)).add_(delta).pow_(r).sub_(delta ** r)
return pcen_
class PCENTransform(nn.Module):
def __init__(self, eps=1e-6, s=0.025, alpha=0.98, delta=2, r=0.5, trainable=True):
super().__init__()
if trainable:
self.log_s = nn.Parameter(torch.log(torch.Tensor([s])))
self.log_alpha = nn.Parameter(torch.log(torch.Tensor([alpha])))
self.log_delta = nn.Parameter(torch.log(torch.Tensor([delta])))
self.log_r = nn.Parameter(torch.log(torch.Tensor([r])))
else:
self.s = s
self.alpha = alpha
self.delta = delta
self.r = r
self.eps = eps
self.trainable = trainable
def forward(self, x):
# x = x.permute((0,2,1)).squeeze(dim=1)
if self.trainable:
x = pcen(x, self.eps, torch.exp(self.log_s), torch.exp(self.log_alpha), torch.exp(self.log_delta), torch.exp(self.log_r), self.training and self.trainable)
else:
x = pcen(x, self.eps, self.s, self.alpha, self.delta, self.r, self.training and self.trainable)
# x = x.unsqueeze(dim=1).permute((0,1,3,2))
return x
# -
def plot_mids_MI(X_CNN, y, MI, p_threshold, root_out, filename, out_format='.png'):
'''Produce plot of all mosquito detected above a p_threshold. Supply Mutual Information values MI, feature inputs
X_CNN, and predictions y (1D array of 0/1s). Plot to be displayed on dashboard either via svg or as part of a
video (looped png) with audio generated for this visual presentation.
`out_format`: .png, or .svg
'''
pos_pred_idx = np.where(y>p_threshold)[0]
fig, axs = plt.subplots(2, sharex=True, figsize=(10,5), gridspec_kw={
'width_ratios': [1],
'height_ratios': [2,1]})
# x_lims = mdates.date2num(T)
# date_format = mdates.DateFormatter('%M:%S')
# axs[0].xaxis_date()
# axs[0].xaxis.set_major_formatter(date_format)
axs[0].set_ylabel('Frequency (kHz)')
axs[0].imshow(np.hstack(X_CNN.squeeze()[pos_pred_idx]), aspect='auto', origin='lower',
extent = [0, len(pos_pred_idx), 0, 4], interpolation=None)
axs[1].plot(y[pos_pred_idx], label='Probability of mosquito')
axs[1].plot(MI[pos_pred_idx], '--', label='Uncertainty of prediction')
axs[1].set_ylim([0., 1.02])
axs[1].legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),
frameon=False, ncol=2)
# axs[1].xaxis.set_major_formatter(date_format)
axs[1].yaxis.set_label_position("right")
axs[1].yaxis.tick_right()
axs[0].yaxis.set_label_position("right")
axs[0].yaxis.tick_right()
# axs[1].set_xlim([t[0], t[-1]])
axs[1].grid(which='major')
# axs[1].set_xlabel('Time (mm:ss)')
axs[1].xaxis.get_ticklocs(minor=True)
axs[1].yaxis.get_ticklocs(minor=True)
axs[1].minorticks_on()
labels = axs[1].get_xticklabels()
# remove the first and the last labels
labels[0] = ""
# set these new labels
axs[1].set_xticklabels(labels)
#
plt.subplots_adjust(top=0.985,
bottom=0.1,
left=0.0,
right=0.945,
hspace=0.065,
wspace=0.2)
# plt.show()
output_filename = os.path.join(root_out, filename) + out_format
plt.savefig(output_filename, transparent=False)
plt.close(plt.gcf()) # May be better to re-write to not use plt API
# fig.autofmt_xdate()
return output_filename
# +
# Subclass the pretrained model and make it a binary classification
class Model4(nn.Module):
def __init__(self, model_name, image_size=224, NFFT=512, n_hop=64, sr=8000):
super().__init__()
# num_classes=0 removes the pretrained head
self.backbone = timm.create_model(model_name,
pretrained=False, num_classes=2, in_chans=1,
drop_path_rate=0.05,
drop_rate=0.05)
##### This section is model specific
#### It freezes some fo the layers by name
#### you'll have to inspect the model to see the names
self.spec_layer = features.STFT(n_fft=NFFT, freq_bins=None, hop_length=n_hop,
window='hann', freq_scale='linear', center=True, pad_mode='reflect',
fmin=300, fmax=1600, sr=sr, output_format="Magnitude", trainable=True,
verbose=False)
#### end layer freezing
self.sizer = T.Resize((image_size,image_size))
self.norm_layer = Normalization(mode='framewise')
self.pcen_layer = PCENTransform(eps=1e-6, s=0.025, alpha=0.6, delta=0.1, r=0.2, trainable=True)
def forward(self, x):
# first compute spectrogram
spec = self.spec_layer(x) # (B, F, T)
# normalize
# spec = spec.transpose(1,2) # (B, T, F)
spec = self.pcen_layer(spec)
spec = self.norm_layer(spec)
# then size for CNN model
# and create a channel
spec = self.sizer(spec)
x = spec.unsqueeze(1)
# then repeat channels
pred = self.backbone(x)
output = {"prediction": pred,
"spectrogram": spec}
return output
# +
# Subclass the pretrained model and make it a binary classification
class Model3(nn.Module):
def __init__(self, model_name, image_size=224, NFFT=512, n_hop=64, sr=8000):
super().__init__()
# num_classes=0 removes the pretrained head
self.backbone = timm.create_model(model_name,
pretrained=True, num_classes=2, in_chans=3,
drop_path_rate=0.2, global_pool='avgmax',
drop_rate=0.2)
##### This section is model specific
#### It freezes some fo the layers by name
#### you'll have to inspect the model to see the names
for name, param in self.backbone.named_parameters():
if param.requires_grad and 'head' not in name \
and not name.startswith('norm') \
and 'stages.3' not in name and 'layers.3' not in name \
and 'blocks.26' not in name and 'blocks.26' not in name \
and 'blocks.24' not in name and 'blocks.25' not in name \
and 'blocks.22' not in name and 'blocks.23' not in name \
and 'blocks.20' not in name and 'blocks.21' not in name \
and 'blocks.22' not in name and 'blocks.23' not in name \
and 'blocks.19' not in name and 'blocks.18' not in name \
and 'blocks.17' not in name and 'blocks.5.' not in name:
param.requires_grad = False
#### end layer freezing
self.spec_layer = features.STFT(n_fft=NFFT, freq_bins=None, hop_length=n_hop,
window='hann', freq_scale='no', center=True, pad_mode='reflect',
fmin=300, fmax=4000, sr=sr, output_format="Magnitude", trainable=True)
self.mel_layer = features.MelSpectrogram(n_fft=NFFT, n_mels=128, hop_length=n_hop,
window='hann', center=True, pad_mode='reflect',
sr=sr, trainable_mel=True, trainable_STFT=True)
self.vqt_layer = features.CQT( sr=sr, hop_length=n_hop, fmin=32.7, fmax=None, n_bins=84, bins_per_octave=12, trainable=True)
# self.out = nn.Linear(self.backbone.num_features, 1)
self.sizer = T.Resize((image_size,image_size))
self.norm_layer = Normalization(mode='framewise')
self.pcen_layer = PCENTransform(eps=1e-6, s=0.025, alpha=0.6, delta=0.1, r=0.2, trainable=True)
def forward(self, x):
# first compute spectrogram
spec = self.spec_layer(x) # (B, F, T)
# normalize
# spec = spec.transpose(1,2) # (B, T, F)
spec = self.pcen_layer(spec)
spec = self.norm_layer(spec)
# if self.training:
# spec = self.timeMasking(spec)
# spec = self.freqMasking(spec)
# then size for CNN model
# and create a channel
spec = self.sizer(spec)
spec = spec.unsqueeze(1)
mel = self.mel_layer(x)
mel = self.norm_layer(mel)
mel = self.sizer(mel)
mel = mel.unsqueeze(1)
vqt = self.vqt_layer(x)
vqt = self.norm_layer(vqt)
vqt = self.sizer(vqt)
vqt = vqt.unsqueeze(1)
x = torch.cat([spec,mel,vqt],dim=1)
# then repeat channels
pred = self.backbone(x)
# pred = self.out(x)
output = {"prediction": pred,
"spectrogram": spec}
return output
# +
# Subclass the pretrained model and make it a binary classification
class Model(nn.Module):
def __init__(self, model_name, image_size=224, NFFT=512, n_hop=64, sr=8000):
super().__init__()
self.backbone = timm.create_model(model_name,
pretrained=False, num_classes=2, in_chans=1,
drop_path_rate=0.1,
drop_rate=0.1)
self.spec_layer = features.STFT(n_fft=NFFT, freq_bins=None, hop_length=n_hop,
window='hann', freq_scale='linear', center=True, pad_mode='reflect',
fmin=300, fmax=3000, sr=sr, output_format="Magnitude", trainable=True)
# self.spec_layer = features.MelSpectrogram(n_fft=config.NFFT, n_mels=128, hop_length=config.n_hop,
# window='hann', center=True, pad_mode='reflect',
# sr=config.rate, trainable_mel=True, trainable_STFT=True)
# self.out = nn.Linear(self.backbone.num_features, 1)
self.sizer = T.Resize((image_size,image_size))
self.norm_layer = Normalization(mode='framewise')
self.pcen_layer = PCENTransform(eps=1e-6, s=0.025, alpha=0.6, delta=0.1, r=0.2, trainable=True)
def forward(self, x):
# first compute spectrogram
spec = self.spec_layer(x) # (B, F, T)
# normalize
# spec = spec.transpose(1,2) # (B, T, F)
spec = self.pcen_layer(spec)
spec = self.norm_layer(spec)
# then size for CNN model
# and create a channel
spec = self.sizer(spec)
x = spec.unsqueeze(1)
# then repeat channels
pred = self.backbone(x)
output = {"prediction": pred,
"spectrogram": spec}
return output
# +
# Subclass the pretrained model and make it a binary classification
class Model1(nn.Module):
def __init__(self, model_name, image_size=224, NFFT=512, n_hop=64, sr=8000):
super().__init__()
self.backbone = timm.create_model(model_name,
pretrained=False, num_classes=2, in_chans=1,
drop_path_rate=0.1,
drop_rate=0.1)
self.spec_layer = features.MelSpectrogram(n_fft=NFFT, n_mels=128, hop_length=n_hop,
window='hann', center=True, pad_mode='reflect',
sr=sr, trainable_mel=True, trainable_STFT=True)
# self.spec_layer = features.MelSpectrogram(n_fft=config.NFFT, n_mels=128, hop_length=config.n_hop,
# window='hann', center=True, pad_mode='reflect',
# sr=config.rate, trainable_mel=True, trainable_STFT=True)
# self.out = nn.Linear(self.backbone.num_features, 1)
self.sizer = T.Resize((image_size,image_size))
self.norm_layer = Normalization(mode='framewise')
self.pcen_layer = PCENTransform(eps=1e-6, s=0.025, alpha=0.6, delta=0.1, r=0.2, trainable=True)
def | |
cur.execute("DELETE FROM articles WHERE id = %s", [id])
mysql.connection.commit()
cur.close()
try:
rmtree(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\articles\article{}".format(session['username'], id))
flash('Article Has Been Deleted Successfully', 'success')
except:
flash('Article Has Been Deleted Successfully', 'success')
return redirect(url_for('dashboard'))
# delete all articles
@app.route('/delete_all_articles', methods=['POST'])
@is_logged_in
def delete_all_articles():
cur = mysql.connection.cursor()
cur.execute("DELETE FROM articles WHERE author = %s", [session['username']])
mysql.connection.commit()
cur.close()
try:
rmtree(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\articles".format(session['username']))
flash('All Articles Has Been Deleted Successfully', 'success')
except:
flash('All Articles Has Been Already Deleted Successfully', 'success')
return redirect(url_for('dashboard'))
# delete account and articles
@app.route('/delete_account', methods=['POST'])
@is_logged_in
def delete_account():
cur = mysql.connection.cursor()
username = session['username']
cur.execute("DELETE FROM users WHERE username = %s", [username])
cur.execute("DELETE FROM articles WHERE author = %s", [username])
mysql.connection.commit()
cur.close()
try:
dir = r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}".format(session['username'])
rmtree(dir)
session.clear()
flash('Your Account Has Been Deleted Successfully', 'success')
except:
session.clear()
flash('Your Account Has Been Deleted Successfully', 'success')
return render_template('home.html')
# upload terms
# UPLOAD_FOLDER = r'C:\Users\OSAMA\Desktop\final\uploads'
UPLOAD_FOLDER = r'C:\Users\OSAMA\Desktop\final\static\uploads'
# ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# check if file is in allowed extensions
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# @app.route('/upload_file_locally', methods=['GET', 'POST'])
# @is_logged_in
# def upload_file_locally():
# if request.method == 'POST':
# if 'file' not in request.files:
# flash('No file part', 'warning')
# return redirect(request.url)
# file = request.files['file']
# if file.filename == '':
# flash('No selected file', 'warning')
# return redirect(request.url)
# if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
# session['upload_file_locally'] = filename
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# flash('Your File Has been uploaded Successfully', 'success')
# return redirect(url_for('download'))
# # return redirect(url_for('dashboard'))
# # return redirect(url_for('dashboard', filename=filename))
# else:
# flash('Not allowed file', 'warning')
# return redirect(request.url)
# return render_template('upload_locally.html')
# upload file locally and in database together -- profile picture --
@app.route('/upload_file_locally', methods=['GET', 'POST'])
@is_logged_in
def upload_file_locally():
if request.method == 'POST':
if 'file' not in request.files:
flash('No File Part', 'warning')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No Selected File', 'warning')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
flash('Your File Has Been Uploaded Successfully', 'success')
sor = "{}\{}".format(UPLOAD_FOLDER, filename)
flash(sor, 'success')
# flash("{}\{}".format(UPLOAD_FOLDER, filename), 'success')
# cur = mysql.connection.cursor()
# cur.execute("UPDATE users SET files=%s WHERE username=%s", (sor, session['username']))
# mysql.connection.commit()
# cur.close()
return redirect(url_for('dashboard'))
# return redirect(request.url)
else:
flash('Not Allowed File', 'warning')
return redirect(request.url)
return render_template('upload_locally.html')
# upload file to database -- profile picture --
@app.route('/upload', methods=['GET', 'POST'])
@is_logged_in
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No File Part!', 'warning')
return redirect(url_for('dashboard'))
file = request.files['file']
if file.filename == '':
flash('No Selected File!', 'warning')
return redirect(url_for('dashboard'))
try:
rmtree(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\profile_picture".format(session['username']))
os.makedirs(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\profile_picture".format(session['username']))
except:
os.makedirs(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\profile_picture".format(session['username']))
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
dir = r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\profile_picture".format(session['username'])
file.save(os.path.join(dir, filename))
cur = mysql.connection.cursor()
cur.execute("UPDATE users SET files=%s WHERE username=%s", (filename, session['username']))
mysql.connection.commit()
cur.close()
flash('Your File Has Been Uploaded Successfully!', 'success')
return redirect(url_for('dashboard'))
# return redirect(url_for('upload_file', filename=filename))
else:
flash('Not Allowed File!', 'warning')
return redirect(url_for('dashboard'))
return render_template('upload.html')
# @app.route('/upload', methods=['GET', 'POST'])
# @is_logged_in
# def upload_file():
# if request.method == 'POST':
# if 'file' not in request.files:
# flash('No file part', 'warning')
# return redirect(url_for('dashboard'))
# file = request.files['file']
# if file.filename == '':
# flash('No selected file', 'warning')
# return redirect(url_for('dashboard'))
# if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
# cur = mysql.connection.cursor()
# cur.execute("UPDATE users SET files=%s WHERE username=%s", (filename, session['username']))
# mysql.connection.commit()
# cur.close()
# flash('Your File Has been uploaded Successfully', 'success')
# return redirect(url_for('dashboard'))
# # return redirect(url_for('upload_file', filename=filename))
# else:
# flash('Not allowed file', 'warning')
# return redirect(url_for('dashboard'))
# return render_template('upload.html')
# upload file to database -- article picture --
@app.route('/upload_file_article', methods=['GET', 'POST'])
@is_logged_in
def upload_file_article():
id = session['edit_article_id']
if request.method == 'POST':
if 'file' not in request.files:
flash('No File Part!', 'warning')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No Selected File!', 'warning')
return redirect(request.url)
try:
rmtree(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\articles\article{}".format(session['username'], id))
os.makedirs(r"static\uploads\users\{}\articles\article{}".format(session['username'], id))
except:
os.makedirs(r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\articles\article{}".format(session['username'], id))
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
dir = r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\articles\article{}".format(session['username'], id)
file.save(os.path.join(dir, filename))
cur = mysql.connection.cursor()
cur.execute("UPDATE articles SET files=%s WHERE id=%s", (filename, id))
mysql.connection.commit()
cur.close()
flash('Article File Has Been Uploaded Successfully!', 'success')
return redirect(url_for('dashboard'))
else:
flash('Not Allowed File!', 'warning')
return redirect(request.url)
return redirect(url_for('dashboard'))
# @app.route('/upload_file_article', methods=['GET', 'POST'])
# @is_logged_in
# def upload_file_article():
# id = session['edit_article_id']
# if request.method == 'POST':
# if 'file' not in request.files:
# flash('No file part')
# return redirect(request.url)
# file = request.files['file']
# if file.filename == '':
# flash('No selected file')
# return redirect(request.url)
# if file and allowed_file(file.filename):
# filename = secure_filename(file.filename)
# cur = mysql.connection.cursor()
# cur.execute("UPDATE articles SET files=%s WHERE id=%s", (filename, id))
# mysql.connection.commit()
# cur.close()
# flash('Article File Has been uploaded Successfully', 'success')
# return redirect(url_for('dashboard'))
# else:
# flash('Not allowed file', 'warning')
# return redirect(request.url)
# return redirect(url_for('dashboard'))
# delete profile file from database
@app.route('/delete_file', methods=['POST'])
@is_logged_in
def delete_file():
cur = mysql.connection.cursor()
cur.execute("UPDATE users SET files='' WHERE username=%s", [session['username']])
mysql.connection.commit()
cur.close()
dir = r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\profile_picture".format(session['username'])
try:
rmtree(dir)
flash('Your File Has Been Deleted Successfully!', 'success')
except:
flash('Your File Has Been Already Deleted Before!', 'warning')
# os.remove(os.path.join(dir))
# os.unlink(os.path.join(dir))
return redirect(url_for('dashboard'))
# delete article file from database
@app.route('/delete_article_file', methods=['POST'])
@is_logged_in
def delete_article_file():
id = session['edit_article_id']
cur = mysql.connection.cursor()
cur.execute("UPDATE articles SET files='' WHERE id=%s", [id])
mysql.connection.commit()
cur.close()
dir = r"C:\Users\OSAMA\Desktop\final\static\uploads\users\{}\articles\article{}".format(session['username'], id)
try:
rmtree(dir)
flash('Article File Has Been Deleted Successfully!', 'success')
except:
flash('Article File Has Been Already Deleted Before!', 'warning')
# flash('Article File Have Been Deleted Successfully', 'success')
return redirect(url_for('dashboard'))
# general search bar in navbar
@app.route('/search', methods=['GET', 'POST'])
@is_logged_in
def search():
if request.method == "POST":
cur = mysql.connection.cursor()
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE(CONVERT(`author` USING utf8)\
# LIKE %s)" , [request.form['search']])
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE( CONVERT(`author` USING utf8)\
# LIKE %s )", ["%" + request.form['search'] + "%"])
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE( CONVERT(`author` USING utf8)\
# LIKE %s OR CONVERT(`title` USING utf8)\
# LIKE %s)", [["%" + request.form['search'] + "%"], ["%" + request.form['search'] + "%"]])
result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
WHERE( CONVERT(`author` USING utf8)\
LIKE %s OR CONVERT(`title` USING utf8)\
LIKE %s OR CONVERT(`id` USING utf8)\
LIKE %s)", [["%" + request.form['search'] + "%"],\
["%" + request.form['search'] + "%"], \
["%" + request.form['search'] + "%"]])
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE(CONVERT(`author` USING utf8)\
# LIKE %s)", [request.form['search'] + "%"])
articles = cur.fetchall()
cur.close()
if result > 0:
# flash("done!", "success")
return render_template('search.html', articles=articles)
else:
# flash("done2!", "success")
msg = 'No Articles Found'
return render_template('search.html', msg=msg)
# return redirect(url_for('home_page'))
# flash("done3!", "success")
return render_template('search.html')
# searches page
@app.route('/searches', methods=['GET', 'POST'])
@is_logged_in
def searches():
return render_template('searches.html')
# search in authors only
@app.route('/search_author', methods=['GET', 'POST'])
@is_logged_in
def search_author():
if request.method == "POST":
cur = mysql.connection.cursor()
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE(CONVERT(`author` USING utf8)\
# LIKE %s)" , [request.form['search']])
result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
WHERE(CONVERT(`author` USING utf8)\
LIKE %s)", ["%" + request.form['search'] + "%"])
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE(CONVERT(`author` USING utf8)\
# LIKE %s)", [request.form['search'] + "%"])
articles = cur.fetchall()
cur.close()
if result > 0:
# flash("done!", "success")
return render_template('search_author.html', articles=articles)
else:
# flash("done2!", "success")
msg = 'No Authors Found'
return render_template('search_author.html', msg=msg)
# return redirect(url_for('home_page'))
# flash("done3!", "success")
return render_template('search_author.html')
# search in title only
@app.route('/search_title', methods=['GET', 'POST'])
@is_logged_in
def search_title():
if request.method == "POST":
cur = mysql.connection.cursor()
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE(CONVERT(`author` USING utf8)\
# LIKE %s)" , [request.form['search']])
result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
WHERE(CONVERT(`title` USING utf8)\
LIKE %s)", ["%" + request.form['search'] + "%"])
# result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
# WHERE(CONVERT(`author` USING utf8)\
# LIKE %s)", [request.form['search'] + "%"])
articles = cur.fetchall()
cur.close()
if result > 0:
# flash("done!", "success")
return render_template('search_title.html', articles=articles)
else:
# flash("done2!", "success")
msg = 'No Titles Found'
return render_template('search_title.html', msg=msg)
# return redirect(url_for('home_page'))
# flash("done3!", "success")
return render_template('search_title.html')
# dropdown search in Author and Title
@app.route('/search_drop_down', methods=['GET', 'POST'])
@is_logged_in
def search_drop_down():
if request.method == "POST":
cur = mysql.connection.cursor()
if request.form['search_type'] == 'author':
if request.form['search'] == 'search' or request.form['search'] == '' or request.form['search'] == ' ':
flash("You have not written anything to search for !", "warning")
return redirect(url_for('searches'))
result = cur.execute("SELECT * FROM articles WHERE author = %s", [request.form['search']])
articles = cur.fetchall()
cur.close()
if result > 0:
# flash("done!", "success")
return render_template('search_drop_down.html', articles=articles)
else:
# flash("done2!", "success")
msg = 'No Authors Found'
return render_template('search_drop_down.html', msg=msg)
elif request.form['search_type'] == 'title':
if request.form['search'] == 'search' or request.form['search'] == '' or request.form['search'] == ' ':
flash("You have not written anything to search for !", "warning")
return redirect(url_for('searches'))
result = cur.execute("SELECT * FROM articles WHERE title = %s", [request.form['search']])
articles = cur.fetchall()
cur.close()
if result | |
<gh_stars>0
b = "r n b q k b n r p p p p p p p p".split(" ") + ['.']*32 + "p p p p p p p p r n b q k b n r".upper().split(" ")
def newBoard():
b = "r n b q k b n r p p p p p p p p".split(" ") + ['.']*32 + "p p p p p p p p r n b q k b n r".upper().split(" ")
def display(): #white side view
c , k= 1 ,0
ap = range(1,9)[::-1]
row,col=[],[]
for i in b:
row.append(i)
if c==8 :
c=0
col.append(row)
row=[]
c+=1
for j in col[::-1]:
print(ap[k] , " |" ,end=" ")
for i in j:
print(i,end=' ')
print()
k+=1
print(" ",end="")
print("-"*18," A B C D E F G H",sep="\n")
def move(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
b[fnum], b[tnum] = '.',b[fnum]
display()
def conv(s):
num = int(s[1])
alp = s[0]
a = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8}
alpn = a[alp]
return ((num-1)*8)+alpn
def rookValid(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
con1,con2,con3=False,False,False
if abs(fnum-tnum)%8==0:
con1=True
rows=[range(0,8),range(8,16),range(16,24),range(24,32),range(32,40),range(40,48),range(48,56),range(56,64)]
for k in rows:
if fnum in k and tnum in k:
con2=True
if con2: #verifies if path is clear if fr and to are in same row
for l in range(fnum+1,tnum):
if b[l] != '.':
con2=False
mi =min(fnum,tnum)
ma = max(fnum,tnum)
if con1:
while mi < ma:
mi+=8
if b[mi] !='.':
con1=False
if (b[fnum].isupper() and not b[tnum].isupper()) or (b[fnum].islower() and not b[tnum].islower()) : con3 = True
return (con1 or con2) and con3
def kingValid(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
if not addressValid(fnum,tnum): return False
con1,con2=False,False
if fnum%8!=0 and fnum%9!=0:
val = [fnum+1 , fnum-1,fnum+8,fnum-8]
elif fnum%8==0: val =[fnum+8 , fnum-8,fnum-1]
else: val =[fnum+8 , fnum-8,fnum+1]
if fnum in val : con=True
if (b[fnum].isupper() and not b[tnum].isupper()) or (b[fnum].islower() and not b[tnum].islower()) : con2 = True
return con1 and con2
def pawnValid(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
if not addressValid(fnum,tnum): return False
if fr.isupper() : c='b'
if fr.islower() : c='w'
if c=='w':
if fr in range(8,16): vm = [fnum+8,fnum+16]
else : vm= [fnum+8]
if b[fnum+7].isupper(): vm.append(fnum+7)
if b[fnum+9].isupper(): vm.append(fnum+9)
if tnum in vm and not b[tnum].islower(): return True
else: return False
if c=='b':
if fr in range(48,56): vm = [fnum-8,fnum-16]
else : vm= [fnum-8]
if b[fnum-7].islower(): vm.append(fnum+7)
if b[fnum-9].islower(): vm.append(fnum+9)
if tnum in vm and not b[tnum].isupper(): return True
else: return False
def bishopValid(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
if not addressValid(fnum,tnum): return False
con1=False
if abs(fnum-tnum)%9==0 or abs(fnum-tnum)%7==0:
con1 = True
if (fnum-tnum)%9==0:
while fnum!=tnum:
tnum+=9
if b[tnum]!='.' : return False
if (fnum-tnum)%7==0:
while fnum!=tnum:
tnum+=7
if b[tnum]!='.' : return False
if (tnum-fnum)%9==0:
while tnum!=fnum:
fnum+=9
if b[fnum]!='.' : return False
if (tnum-fnum)%7==0:
while tnum!=fnum:
fnum+=7
if b[fnum]!='.' : return False
if (b[fnum].isupper() and not b[tnum].isupper()) or (b[fnum].islower() and not b[tnum].islower()) : con2 = True
return con1 and con2
def queenValid(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
if not addressValid(fnum,tnum): return False
return bishopValid(fr,to) or rookValid(fr,to)
def knightValid(fr,to):
fnum = (conv(fr))-1
tnum = (conv(to))-1
if not addressValid(fnum,tnum): return False
if tnum in [fnum+17,fnum-17,fnum+15,fnum-15,fnum+10,fnum-6,fnum+6,fnum-10]: con1=True
if (b[fnum].isupper() and not b[tnum].isupper()) or (b[fnum].islower() and not b[tnum].islower()) : con2=True
return con1 and con2
def addressValid(fnum,tnum):
return 0<=fnum<64 and 0<=tnum<64
def rookMoves(pos):
num=(conv(pos))-1 #num is index
if b[num].isupper() : c='b'
elif b[num].islower() : c='w'
else: return "Block is empty"
vm=[]
col=(num+1)%8
if col==0: col=8
row=int(pos[1])
if c=='w':
block=num+8
while row<=8:
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block+=8
row+=1
row=int(pos[1])
block=num-8
while row>0:
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block-=8
row-=1
tcol=col+1 #col is from 1 to 8 , row is from 1 to 8
block =num+1
while tcol<=8:
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block+=1
tcol+=1
block =num-1
tcol=col
while tcol>1:
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block-=1
tcol-=1
tcol=col
row=int(pos[1])
if c=='b':
block=num+8
while row<=8:
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block+=8
row+=1
row=int(pos[1])
block=num-8
while row>1:
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block-=8
row-=1
tcol=col+1 #col is from 1 to 8 , row is from 1 to 8
block =num+1
while tcol<=8:
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block+=1
tcol+=1
block =num-1
tcol=col
while tcol>1:
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block-=1
tcol-=1
move=[]
for l in vm:
move.append(numToAlg(l))
return move
def bishopMoves(pos):
num=(conv(pos))-1
if b[num].isupper() : c='b'
elif b[num].islower() : c='w'
else: return "Block is empty"
vm=[]
col=(num+1)%8
if col==0: col=8
row=int(pos[1])+1
if c=='w':
tcol=col+1
row=int(pos[1])+1
block=num+9
while row<=8 and col<=8 : #goes top right
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block+=9
row+=1
tcol+=1
row=int(pos[1])-1
tcol=col-1
block=num-9
while row>0 and tcol>1: #goes below left
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block-=9
row-=1
tcol-=1
row=int(pos[1])-1
tcol=col+1
block =num-7
while tcol<=8 and row>1: #goes below right
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block-=7
tcol+=1
row-=1
block =num+7
tcol=col-1
row=int(pos[1])+1
while tcol>0 and row<=8: #goes top left
if b[block] == '.' : vm.append(block)
if b[block].isupper() :
vm.append(block)
break
if b[block].islower():
break
block+=7
tcol-=1
row+=1
if c=='b':
tcol=col+1
row=int(pos[1])+1
block=num+9
while row<=8 and col<=8 : #goes top right
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block+=9
row+=1
tcol+=1
row=int(pos[1])-1
tcol=col-1
block=num-9
while row>0 and tcol>1: #goes below left
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block-=9
row-=1
tcol-=1
row=int(pos[1])-1
tcol=col+1
block =num-7
while tcol<=8 and row>1: #goes below right
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].isupper():
break
block-=7
tcol+=1
row-=1
block =num+7
tcol=col-1
row=int(pos[1])+1
while tcol>0 and row<=8: #goes top left
if b[block] == '.' : vm.append(block)
if b[block].islower() :
vm.append(block)
break
if b[block].upper():
break
block+=7
tcol-=1
row+=1
move=[]
for l in vm:
move.append(numToAlg(l))
return move
def queenMoves(pos):
return rookMoves(pos) + bishopMoves(pos)
def knightMoves(pos):
num = conv(pos)-1 #num is index
vm = [num-17,num-15,num-10,num-6,num+6,num+10,num+15,num+17]
if vm[3]%8 in [0,1]:
vm.pop(3)
vm.pop(5)
if vm[4]%8 in [6,7]:
vm.pop(4)
vm.pop(2)
tvm=[]
for i in vm:
if (i>=0 and i<=63) and not ((b[num].isupper and b[i].isupper()) or (b[num].islower and b[i].islower())) : tvm.append(i)
move=[]
for l in tvm:
move.append(numToAlg(l))
return move
def kingMoves(pos):
num = conv(pos)-1 #num is index
vm = [num+8,num-8,num+9,num-9,num+7,num-7,num+1,num-1]
if vm[2]%8==0:
vm.pop(2)
vm.pop(6)
vm.pop(5)
if vm[3]%8 ==7:
vm.pop(3)
vm.pop(-1)
vm.pop(4)
tvm=[]
for i in vm:
if (i>=0 and i<=63) and not ((b[num].isupper and b[i].isupper()) or (b[num].islower and b[i].islower())) : tvm.append(i)
move=[]
for l in tvm:
move.append(numToAlg(l))
return move
def pawnMoves(pos):
num = conv(pos)-1
vm=[]
if b[num].islower() :
if b[num+8] =='.':vm.append(num+8)
if b[num+9].isupper() : vm.append(num+9)
if b[num+7].isupper() : vm.append(num+7)
if b[num+16] =='.' and 7<num<16 : vm.append(num+16)
if b[num].isupper() :
if b[num-8] =='.':vm.append(num-8)
if b[num-9].islower() : vm.append(num-9)
if b[num-7].islower() : vm.append(num-7)
if b[num-16] =='.' and 7<num<16 : vm.append(num-16)
list =[]
for i in vm:
list.append(numToAlg(i))
return list
def moves(pos):
num = conv(pos)-1
if b[num].lower() =='k':
return(kingMoves(pos))
elif b[num].lower() == 'q':
return(queenMoves(pos))
elif b[num].lower() == 'p':
return(pawnMoves(pos))
elif b[num].lower() == 'r':
return(rookMoves(pos))
elif b[num].lower() == 'b':
return(bishopMoves(pos))
elif b[num].lower() == 'n':
return(knightMoves(pos))
def isCheck(pos):
num = conv(pos)-1
r = rookMoves(pos)
b = bishopMoves(pos)
n = knightMoves(pos)
check = False
for rcase in r:
if b[conv(rcase)-1].lower() in ['r','q'] and ( (b[num].islower() and b[conv(rcase)-1].isupper()) or (b[num].isupper() and b[conv(rcase)-1].islower()) ) | |
#! /usr/bin/env python
# -*- coding : utf-8 -*-
import os
import glob
import random
import numpy as np
import numpy.ma as ma
import const
import errors
import options
import sio
from atomtype import AtomType
from voronoi import dump
import voronoi.numpy.ngbr as NN
# import first pyvoro:
try:
import voronoi.pyvoro.voronoi as vn
print 'Imported pyvoro module!'
except ImportError:
import voronoi.numpy.voronoi as vn
print 'Failure finding pyvoro module, resorting to scipy.spatial.Delaunay'
class Geom(object):
""" Class for storing model geometry, model in Voronoi calc
Can be read from :
-- FDF (done)
-- XV (done)
-- SPH (integrated RHO)
-- ANI (done)
-- output (almost done)
"""
keys = {'NumberOfSpecies': ['n', None],
'NumberOfAtoms': ['n', None],
'ChemicalSpeciesLabel': ['b', None],
'LatticeConstant': ['m', None],
'LatticeVectors': ['b', None],
'AtomicCoordinatesFormat': ['s', None],
'AtomicCoordinatesFormatOut': ['s', None],
'AtomicCoordinatesAndAtomicSpecies': ['b', None]}
def __init__(self, calc_type=None, data=None):
""" Initializing geometry
"""
self.alat = 1.
self.unit = 'Ang'
self.vc = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.atoms = None
self.names = None
self.types = None
self.opts = None
self.vp = None
# self.kwds = {'common': {'pbc': True,
# 'ratio': 0.5,
# 'rm_small': True,
# 'eps': 0.5
# },
# 'absmagmom': {'abs_mm': True},
# 'magmom': {'abs_mm': False},
# }
# reading if we need to
if calc_type is not None:
self.read(calc_type, data)
# Reading ---
def read(self, calc_type, data):
""" Reading geometry from several types of input
:param calc_type: calculation type (can be 'fdf', 'es', 'xv')
:param data - data of requested type
:param data: FDF dictionary
:type data: dict or EvolStep - evolution step class
"""
act = {'fdf': self.fdf2geom,
'es': self.es2geom,
'xv': self.xv2geom
}
# as usual, switch triggering
act.get(calc_type, self.unsupported2geom)(data)
# adjusting coordinates to cell
self.to_cell()
# # update props with distances to group
# labels = self.names['label']
# for label in labels:
# at = self.filter('label', lambda x: x == label)
# self.props['distance_' + label] = self.distance_to_group
# self.kwds['distance_' + label] = {'group': at}
# get atomType
self.types = AtomType(self)
# def get_prop_names(self):
# return sorted(self.props.keys())
def fdf2geom(self, data):
""" Geometry init from options data
"""
self.opts = data
# TODO: there may be no LatticeConstant and LatticeVectors!
self.alat = self.opts['LatticeConstant'].value
self.unit = self.opts['LatticeConstant'].unit
try:
self.vc = np.array(self.opts['LatticeVectors'].value).astype(float) * self.alat
except (KeyError,):
self.vc = np.eye(3, dtype='f4') * self.alat
self.names = np.rec.fromarrays(np.transpose(self.opts['ChemicalSpeciesLabel'].value),
names='i,z,label',
formats='|i1,|i2,|S2'
)
acas = np.array(self.opts['AtomicCoordinatesAndAtomicSpecies'].value)
crd = acas[:, 0:3]
typ = acas[:, 3]
self.atoms = np.rec.fromarrays([crd, typ], names='crd, itype', formats='|3f8,|i2')
# converting crd array to self.alat units
# crd units dictionary
cud = {'Ang': 'Ang',
'Bohr': 'Bohr',
'NotScaledCartesianAng': 'Ang',
'NotScaledCartesianBohr': 'Bohr',
'ScaledCartesian': 'scale'}
# crd units
cu = cud[self.opts['AtomicCoordinatesFormat'].value]
self.atoms['crd'] = convert(self.atoms['crd'], alat=self.alat, inunit=cu, outunit=self.unit)
def es2geom(self, data):
""" initialize geom from EvolStep instance
"""
# alat = 1 unit of crd
self.alat = 1.
self.unit = data.aunit
# atoms array is in data
self.atoms = data.atoms
# vc - convert from vcunit to aunit
self.vc = convert(data.vc, inunit=data.vcunit, outunit=data.aunit)
# now get types
ilabel, ind = np.unique(data.atoms['label'], return_index=True)
iz = np.array([const.PeriodicTable[l] for l in ilabel])
try:
ityp = data.atoms['itype'][ind]
except ValueError:
ityp = np.arange(len(iz)) + 1
ilabel = np.array([const.PeriodicTable[z] for z in iz])
self.names = np.rec.fromarrays([ityp, iz, ilabel], names='i,z,label', formats='|i1,|i2,|S2')
if isinstance(data.data, dict):
for name, field in data.data.iteritems():
self.add_fields(name, field)
def xv2geom(self, calc_dir):
""" Reads geometry from XV file
:param calc_dir: directory where XV file is located
If no or many XV files in directory, readxv returns -1
"""
xvf = glob.glob(os.path.join(calc_dir, '*.XV'))
if len(xvf) != 1:
print 'Geom.ReadXV: Either no or too many XV files in %s' % (dir, )
return -1
# alat = 1. Bohr, because later we read absolute coordinates of lattice vectors in Bohr
self.alat = 1.
self.unit = 'Bohr'
xv = sio.XVFile(xvf[0])
self.vc = np.array(xv.vc)
# get atomic positions (in Bohr, I assume)
self.atoms = np.rec.fromarrays([xv.crd, xv.v, xv.i_type], names='crd,v,itype', formats='|3f8,|3f8,|i2')
# now get types
ityp, ind = np.unique(np.array(xv.i_type), return_index=True)
iz = np.array(xv.z)[ind]
ilabel = np.array([const.PeriodicTable[z] for z in iz])
self.names = np.rec.fromarrays([ityp, iz, ilabel], names='i,z,label', formats='|i1,|i2,|S2')
def unsupported2geom(self, data):
""" Raise exception when one wants to read geometry from strange place
"""
raise errors.UnsupportedError('Now Geom instance supports reading only from fdf, evolstep (es) and xv file')
def update_with_types(self, types):
self.types.removeTypes()
for (typename, condition) in types.iteritems():
self.types.addType(condition, typename)
self.types.finalize()
# Auxiliary routines ---
def fdf_options(self):
""" Returns a set of fdf options needed for Geom
"""
return self.keys.keys()
def write(self, calc_dir):
""" Writes geometry to STRUCT.fdf file
:param calc_dir: directory where the file will be located
"""
fn = os.path.join(calc_dir, 'STRUCT.fdf')
self.opts.write(fn)
def filter(self, name, f):
""" Filters geometry atoms by field name & value
"""
return np.where(f(self.atoms[name]))[0]
def unique(self, name):
""" Returns a list of unique properties of the model
"""
return list(np.unique(self.atoms[name]))
def to_cell(self):
crd = self.atoms['crd']
vc = self.vc
# Get fractional coordinates
vc_inv = np.linalg.inv(vc)
crd_vc = np.dot(crd, vc_inv)
# Fractional coordinates - to cell
crd_vc -= np.floor(crd_vc)
crd = np.dot(crd_vc, vc)
self.atoms['crd'] = crd
# End Auxiliary routines ---
def distance_to_group(self, **kwds):
""" Finds distance to the nearest of the atoms belonging to group
In:
-> group (list of indexes) - list of atom indexes
"""
# taking necessary keywords from kwds
group = kwds['group']
ngroup = len(group)
nat = len(self.atoms)
rij = r(self.atoms['crd'], self.vc, n=(group, range(nat)))
dist = np.sqrt((rij**2.0).sum(axis=1))
dist = np.min(dist.reshape(ngroup, nat), axis=0)
return dist
# Voronoi tesselation routines ---
def voronoi(self, pbc, ratio):
""" Get Voronoi tesselation based on the libraries available:
-> numpy: QHull library
-> pyvoro: Python interface to Voro++ library (~30 times faster than Numpy)
Input:
-> pbc - whether to use periodic boundary conditions
"""
vd = dump.dump_shs()
d = vd.shs_geom(self)
self.vp = vn.model_voronoi(d)
self.vp.voronoi(pbc, ratio)
def voronoi_med(self):
""" Get voronoi tesselation of geometry by pure python Medvedev algorithm
"""
vd = dump.dump_shs()
d = vd.shs_geom(self)
ngbr = NN.model_ngbr(d)
ngbr.make_ngbr()
mv = ngbr.toMV()
mv.make_voronoi(1)
sq, vol, k_sph, pl_mv = mv.get_voronoi_param()
self.add_fields('sq', np.array(sq))
self.add_fields('vol', np.array(vol))
self.add_fields('k_sph', np.array(k_sph))
self.nb = pl_mv
def label(self, **kwds):
return self.atoms['label']
def vp_neighbors(self, **kwds):
""" Finds neighbors of VPs
"""
# FIXME: Works only with pyvoro
pbc = kwds.get('pbc', True)
ratio = kwds.get('ratio', 0.5)
if not hasattr(self,'vp'):
self.voronoi(pbc, ratio)
rm_small = kwds.get('rm_small', True)
eps = kwds.get('eps', 0.05)
return self.vp.vp_neighbors(rm_small, eps)
# fa_np = self.vp_facearea(pbc, ratio, rm_small, eps)
# If there is a face (with non-zero area) between atoms, then they are neighbors
# fa_np[fa_np > 0] = 1.
# return fa_np
def vp_distance(self, pbc=True, ratio=0.5, rm_small=False, eps=0.5):
"""Finds distances between VP neighbors"""
if self.vp is None:
self.voronoi(pbc, ratio)
f = self.vp.vp_faces()
if rm_small:
fa = self.vp.vp_face_area(f)
f = self.vp.remove_small_faces(f, fa, eps)
dist = self.vp.vp_distance(f)
# here fa is the list of dictionaries, we make it a 2d numpy array
# with masked values
# WARNING: O(nat^2 * nsteps) memory consumption!
nat = len(dist)
dist_np = np.zeros((nat, nat), dtype=np.float)
for iat, ngbr in enumerate(dist):
for jat, distance in ngbr.iteritems():
dist_np[iat, jat] = distance
dist_np = ma.masked_values(dist_np, 0.)
return dist_np
def add_fields(self, name, field):
""" Add fields to self.atoms
:param name: (str) name of field
:param field: data to append. If ndarray, field is added with input name. If recarray,self.atoms is joined with
field (thus preserving names from field recarray)
:return:
"""
import numpy.lib.recfunctions as nlrf
fcn = field.__class__.__name__
if fcn == 'ndarray':
nlrf.append_fields(self.atoms, name, field, asrecarray=True, usemask=False)
elif fcn == 'recarray':
for name in field.dtype.names:
# FIXME: very, very dirty hack!
if name == 'forces':
self.forces = field[name]
continue
self.atoms = nlrf.rec_append_fields(self.atoms, name, field[name])
else:
raise TypeError('Geom.add_fields: Only arrays or recarrays can be added to self.atoms as fields!')
def has_fields(self, *fields):
"""Returns True if fields are present in self.atoms.dtype.names"""
return all([(f in self.atoms.dtype.names) for f in fields])
def __getitem__(self, item):
try:
return self.atoms[item]
except ValueError:
if item == 'vc':
return self.vc
else:
raise TypeError('Geom: Only int or string key is allowed')
def geom2opts(self):
""" Converts geometry (read from anywhere / altered) to the list of values
"""
data = {'NumberOfSpecies': [1, str(len(self.names))],
| |
<reponame>dyllllll/tencentcloud-sdk-python
# -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class CreateAsrVocabRequest(AbstractModel):
"""CreateAsrVocab请求参数结构体
"""
def __init__(self):
"""
:param Name: 热词表名称,长度在1-255之间
:type Name: str
:param Description: 热词表描述,长度在0-1000之间
:type Description: str
:param WordWeights: 词权重数组,包含全部的热词和对应的权重。每个热词的长度不大于10,权重为[1,10]之间整数,数组长度不大于128
:type WordWeights: list of HotWord
:param WordWeightStr: 词权重文件(纯文本文件)的二进制base64编码,以行分隔,每行的格式为word|weight,即以英文符号|为分割,左边为词,右边为权重,如:你好|5。
当用户传此参数(参数长度大于0),即以此参数解析词权重,WordWeights会被忽略
:type WordWeightStr: str
"""
self.Name = None
self.Description = None
self.WordWeights = None
self.WordWeightStr = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Description = params.get("Description")
if params.get("WordWeights") is not None:
self.WordWeights = []
for item in params.get("WordWeights"):
obj = HotWord()
obj._deserialize(item)
self.WordWeights.append(obj)
self.WordWeightStr = params.get("WordWeightStr")
class CreateAsrVocabResponse(AbstractModel):
"""CreateAsrVocab返回参数结构体
"""
def __init__(self):
"""
:param VocabId: 词表ID,可用于获取词表信息
:type VocabId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.VocabId = None
self.RequestId = None
def _deserialize(self, params):
self.VocabId = params.get("VocabId")
self.RequestId = params.get("RequestId")
class CreateCustomizationRequest(AbstractModel):
"""CreateCustomization请求参数结构体
"""
def __init__(self):
"""
:param ModelName: 自学习模型名称,需在1-20字符之间
:type ModelName: str
:param TextUrl: 文本文件的下载地址,服务会从该地址下载文件, 以训练模型,目前仅支持腾讯云cos
:type TextUrl: str
:param ModelType: 自学习模型类型,填写8k或者16k
:type ModelType: str
:param TagInfos: 标签信息
:type TagInfos: list of str
"""
self.ModelName = None
self.TextUrl = None
self.ModelType = None
self.TagInfos = None
def _deserialize(self, params):
self.ModelName = params.get("ModelName")
self.TextUrl = params.get("TextUrl")
self.ModelType = params.get("ModelType")
self.TagInfos = params.get("TagInfos")
class CreateCustomizationResponse(AbstractModel):
"""CreateCustomization返回参数结构体
"""
def __init__(self):
"""
:param ModelId: 模型ID
:type ModelId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ModelId = None
self.RequestId = None
def _deserialize(self, params):
self.ModelId = params.get("ModelId")
self.RequestId = params.get("RequestId")
class CreateRecTaskRequest(AbstractModel):
"""CreateRecTask请求参数结构体
"""
def __init__(self):
"""
:param EngineModelType: 引擎模型类型。
电话场景:
• 8k_en:电话 8k 英语;
• 8k_zh:电话 8k 中文普通话通用;
非电话场景:
• 16k_zh:16k 中文普通话通用;
• 16k_zh_video:16k 音视频领域;
• 16k_en:16k 英语;
• 16k_ca:16k 粤语;
• 16k_ja:16k 日语;
• 16k_wuu-SH:16k 上海话方言;
:type EngineModelType: str
:param ChannelNum: 语音声道数。1:单声道;2:双声道(仅支持 8k_zh 引擎模型)。
:type ChannelNum: int
:param ResTextFormat: 识别结果返回形式。0: 识别结果文本(含分段时间戳); 1:词级别粒度的[详细识别结果](https://cloud.tencent.com/document/api/1093/37824#SentenceDetail)(不含标点,含语速值);2:词级别粒度的详细识别结果(包含标点、语速值)
:type ResTextFormat: int
:param SourceType: 语音数据来源。0:语音 URL;1:语音数据(post body)。
:type SourceType: int
:param SpeakerDiarization: 是否开启说话人分离,0:不开启,1:开启(仅支持8k_zh,16k_zh,16k_zh_video引擎模型,单声道音频)
:type SpeakerDiarization: int
:param SpeakerNumber: 说话人分离人数(需配合开启说话人分离使用),取值范围:0-10,0代表自动分离(目前仅支持≤6个人),1-10代表指定说话人数分离。
注:话者分离目前是beta版本,请根据您的需要谨慎使用
:type SpeakerNumber: int
:param CallbackUrl: 回调 URL,用户自行搭建的用于接收识别结果的服务器地址, 长度小于2048字节。如果用户使用回调方式获取识别结果,需提交该参数;如果用户使用轮询方式获取识别结果,则无需提交该参数。
:type CallbackUrl: str
:param Url: 语音的URL地址,需要公网可下载。长度小于2048字节,当 SourceType 值为 0 时须填写该字段,为 1 时不需要填写。注意:请确保录音文件时长在5个小时之内,否则可能识别失败。请保证文件的下载速度,否则可能下载失败。
:type Url: str
:param Data: 语音数据,当SourceType 值为1时必须填写,为0可不写。要base64编码(采用python语言时注意读取文件应该为string而不是byte,以byte格式读取后要decode()。编码后的数据不可带有回车换行符)。音频数据要小于5MB。
:type Data: str
:param DataLen: 数据长度,非必填(此数据长度为数据未进行base64编码时的数据长度)。
:type DataLen: int
:param HotwordId: 热词id。用于调用对应的热词表,如果在调用语音识别服务时,不进行单独的热词id设置,自动生效默认热词;如果进行了单独的热词id设置,那么将生效单独设置的热词id。
:type HotwordId: str
:param FilterDirty: 是否过滤脏词(目前支持中文普通话引擎)。0:不过滤脏词;1:过滤脏词;2:将脏词替换为 * 。默认值为 0。
:type FilterDirty: int
:param FilterModal: 是否过语气词(目前支持中文普通话引擎)。0:不过滤语气词;1:部分过滤;2:严格过滤 。默认值为 0。
:type FilterModal: int
:param ConvertNumMode: 是否进行阿拉伯数字智能转换(目前支持中文普通话引擎)。0:不转换,直接输出中文数字,1:根据场景智能转换为阿拉伯数字。默认值为 1。
:type ConvertNumMode: int
:param Extra: 附加参数
:type Extra: str
:param FilterPunc: 是否过滤标点符号(目前支持中文普通话引擎)。 0:不过滤,1:过滤句末标点,2:过滤所有标点。默认为0。
:type FilterPunc: int
"""
self.EngineModelType = None
self.ChannelNum = None
self.ResTextFormat = None
self.SourceType = None
self.SpeakerDiarization = None
self.SpeakerNumber = None
self.CallbackUrl = None
self.Url = None
self.Data = None
self.DataLen = None
self.HotwordId = None
self.FilterDirty = None
self.FilterModal = None
self.ConvertNumMode = None
self.Extra = None
self.FilterPunc = None
def _deserialize(self, params):
self.EngineModelType = params.get("EngineModelType")
self.ChannelNum = params.get("ChannelNum")
self.ResTextFormat = params.get("ResTextFormat")
self.SourceType = params.get("SourceType")
self.SpeakerDiarization = params.get("SpeakerDiarization")
self.SpeakerNumber = params.get("SpeakerNumber")
self.CallbackUrl = params.get("CallbackUrl")
self.Url = params.get("Url")
self.Data = params.get("Data")
self.DataLen = params.get("DataLen")
self.HotwordId = params.get("HotwordId")
self.FilterDirty = params.get("FilterDirty")
self.FilterModal = params.get("FilterModal")
self.ConvertNumMode = params.get("ConvertNumMode")
self.Extra = params.get("Extra")
self.FilterPunc = params.get("FilterPunc")
class CreateRecTaskResponse(AbstractModel):
"""CreateRecTask返回参数结构体
"""
def __init__(self):
"""
:param Data: 录音文件识别的请求返回结果,包含结果查询需要的TaskId
:type Data: :class:`tencentcloud.asr.v20190614.models.Task`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = Task()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DeleteAsrVocabRequest(AbstractModel):
"""DeleteAsrVocab请求参数结构体
"""
def __init__(self):
"""
:param VocabId: 热词表Id
:type VocabId: str
"""
self.VocabId = None
def _deserialize(self, params):
self.VocabId = params.get("VocabId")
class DeleteAsrVocabResponse(AbstractModel):
"""DeleteAsrVocab返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteCustomizationRequest(AbstractModel):
"""DeleteCustomization请求参数结构体
"""
def __init__(self):
"""
:param ModelId: 要删除的模型ID
:type ModelId: str
"""
self.ModelId = None
def _deserialize(self, params):
self.ModelId = params.get("ModelId")
class DeleteCustomizationResponse(AbstractModel):
"""DeleteCustomization返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeTaskStatusRequest(AbstractModel):
"""DescribeTaskStatus请求参数结构体
"""
def __init__(self):
"""
:param TaskId: 从CreateRecTask接口获取的TaskId,用于获取任务状态与结果。
:type TaskId: int
"""
self.TaskId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
class DescribeTaskStatusResponse(AbstractModel):
"""DescribeTaskStatus返回参数结构体
"""
def __init__(self):
"""
:param Data: 录音文件识别的请求返回结果。
:type Data: :class:`tencentcloud.asr.v20190614.models.TaskStatus`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = TaskStatus()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DownloadAsrVocabRequest(AbstractModel):
"""DownloadAsrVocab请求参数结构体
"""
def __init__(self):
"""
:param VocabId: 词表ID。
:type VocabId: str
"""
self.VocabId = None
def _deserialize(self, params):
self.VocabId = params.get("VocabId")
class DownloadAsrVocabResponse(AbstractModel):
"""DownloadAsrVocab返回参数结构体
"""
def __init__(self):
"""
:param VocabId: 词表ID。
:type VocabId: str
:param WordWeightStr: 词表权重文件形式的base64值。
:type WordWeightStr: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.VocabId = None
self.WordWeightStr = None
self.RequestId = None
def _deserialize(self, params):
self.VocabId = params.get("VocabId")
self.WordWeightStr = params.get("WordWeightStr")
self.RequestId = params.get("RequestId")
class DownloadCustomizationRequest(AbstractModel):
"""DownloadCustomization请求参数结构体
"""
def __init__(self):
"""
:param ModelId: 自学习模型ID
:type ModelId: str
"""
self.ModelId = None
def _deserialize(self, params):
self.ModelId = params.get("ModelId")
class DownloadCustomizationResponse(AbstractModel):
"""DownloadCustomization返回参数结构体
"""
def __init__(self):
"""
:param DownloadUrl: 下载地址
:type DownloadUrl: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DownloadUrl = None
self.RequestId = None
def _deserialize(self, params):
self.DownloadUrl = params.get("DownloadUrl")
self.RequestId = params.get("RequestId")
class GetAsrVocabListRequest(AbstractModel):
"""GetAsrVocabList请求参数结构体
"""
def __init__(self):
"""
:param TagInfos: 标签信息,格式为“$TagKey : $TagValue ”,中间分隔符为“空格”+“:”+“空格”
:type TagInfos: list of str
:param Offset: 分页Offset
:type Offset: int
:param Limit: 分页Limit
:type Limit: int
"""
self.TagInfos = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.TagInfos = params.get("TagInfos")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class GetAsrVocabListResponse(AbstractModel):
"""GetAsrVocabList返回参数结构体
"""
def __init__(self):
"""
:param VocabList: 热词表列表
:type VocabList: list of Vocab
:param TotalCount: 热词列表总数
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.VocabList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("VocabList") is not None:
self.VocabList = []
for item in params.get("VocabList"):
obj = Vocab()
obj._deserialize(item)
self.VocabList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class GetAsrVocabRequest(AbstractModel):
"""GetAsrVocab请求参数结构体
"""
def __init__(self):
"""
:param VocabId: 热词表ID
:type VocabId: str
"""
self.VocabId = None
def _deserialize(self, params):
self.VocabId = params.get("VocabId")
class GetAsrVocabResponse(AbstractModel):
"""GetAsrVocab返回参数结构体
"""
def __init__(self):
"""
:param Name: 热词表名称
:type Name: str
:param Description: 热词表描述
:type Description: str
:param VocabId: 热词表ID
:type VocabId: str
:param WordWeights: 词权重列表
:type WordWeights: list of HotWord
:param CreateTime: 词表创建时间
:type CreateTime: str
:param UpdateTime: 词表更新时间
:type UpdateTime: str
:param State: 热词表状态,1为默认状态即在识别时默认加载该热词表进行识别,0为初始状态
:type State: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Name = None
self.Description = None
self.VocabId = None
self.WordWeights = None
self.CreateTime = None
self.UpdateTime = None
self.State = None
self.RequestId = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Description = params.get("Description")
self.VocabId = params.get("VocabId")
if params.get("WordWeights") is not None:
self.WordWeights = []
for item in params.get("WordWeights"):
obj = HotWord()
obj._deserialize(item)
self.WordWeights.append(obj)
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.State = params.get("State")
self.RequestId = params.get("RequestId")
class GetCustomizationListRequest(AbstractModel):
"""GetCustomizationList请求参数结构体
"""
def __init__(self):
"""
:param TagInfos: 标签信息,格式为“$TagKey : $TagValue ”,中间分隔符为“空格”+“:”+“空格”
:type TagInfos: list of str
:param Limit: 分页大小
:type Limit: int
:param Offset: 分页offset
:type Offset: int
"""
self.TagInfos = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.TagInfos = params.get("TagInfos")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
class GetCustomizationListResponse(AbstractModel):
"""GetCustomizationList返回参数结构体
"""
def __init__(self):
"""
:param Data: 自学习模型数组
注意:此字段可能返回 null,表示取不到有效值。
:type Data: list of Model
:param TotalCount: 自学习模型总量
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = []
for item in params.get("Data"):
obj = Model()
obj._deserialize(item)
self.Data.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class HotWord(AbstractModel):
"""热词的词和权重
"""
def __init__(self):
"""
:param Word: 热词
:type Word: str
:param Weight: 权重
:type Weight: int
"""
self.Word = None
self.Weight = None
def _deserialize(self, params):
self.Word | |
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
other : Tally or Real
The tally or scalar value to divide this tally by
Returns
-------
Tally
A new derived tally which is the dividend of this tally and the
other tally or scalar value in the division.
Raises
------
ValueError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{0}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(other, Tally):
new_tally = self._outer_product(other, binary_op='/')
elif isinstance(other, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self.mean / other
new_tally._std_dev = self.std_dev * np.abs(1. / other)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realization = self.num_realizations
new_tally.num_score_bins = self.num_score_bins
for filter in self.filters:
new_tally.add_filter(filter)
for nuclide in self.nuclides:
new_tally.add_nuclide(nuclide)
for score in self.scores:
new_tally.add_score(score)
else:
msg = 'Unable to divide Tally ID="{0}" ' \
'by "{1}"'.format(self.id, other)
raise ValueError(msg)
return new_tally
def __div__(self, other):
return self.__truediv__(other)
def __pow__(self, power):
"""Raises this tally to another tally or scalar value power.
This method builds a new tally with data that is the power of
this tally's data to that from the other tally or scalar value. If the
filters, scores and nuclides in the two tallies are not the same, then
they are combined in all possible ways in the new derived tally.
Uncertainty propagation is used to compute the standard deviation
for the new tally's data. It is important to note that this makes
the assumption that the tally data is independently distributed.
In most use cases, this is *not* true and may lead to under-prediction
of the uncertainty. The uncertainty propagation model is from the
following source:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty
Parameters
----------
power : Tally or Real
The tally or scalar value exponent
Returns
-------
Tally
A new derived tally which is this tally raised to the power of the
other tally or scalar value in the exponentiation.
Raises
------
ValueError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method.
"""
# Check that results have been read
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{0}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
if isinstance(power, Tally):
new_tally = self._outer_product(power, binary_op='^')
elif isinstance(power, Real):
new_tally = Tally(name='derived')
new_tally._derived = True
new_tally.name = self.name
new_tally._mean = self._mean ** power
self_rel_err = self.std_dev / self.mean
new_tally._std_dev = np.abs(new_tally._mean * power * self_rel_err)
new_tally.estimator = self.estimator
new_tally.with_summary = self.with_summary
new_tally.num_realization = self.num_realizations
new_tally.num_score_bins = self.num_score_bins
for filter in self.filters:
new_tally.add_filter(filter)
for nuclide in self.nuclides:
new_tally.add_nuclide(nuclide)
for score in self.scores:
new_tally.add_score(score)
else:
msg = 'Unable to raise Tally ID="{0}" to ' \
'power "{1}"'.format(self.id, power)
raise ValueError(msg)
return new_tally
def __radd__(self, other):
"""Right addition with a scalar value.
This reverses the operands and calls the __add__ method.
Parameters
----------
other : Integer or Real
The scalar value to add to this tally
Returns
-------
Tally
A new derived tally of this tally added with the scalar value.
"""
return self + other
def __rsub__(self, other):
"""Right subtraction from a scalar value.
This reverses the operands and calls the __sub__ method.
Parameters
----------
other : Integer or Real
The scalar value to subtract this tally from
Returns
-------
Tally
A new derived tally of this tally subtracted from the scalar value.
"""
return -1. * self + other
def __rmul__(self, other):
"""Right multiplication with a scalar value.
This reverses the operands and calls the __mul__ method.
Parameters
----------
other : Integer or Real
The scalar value to multiply with this tally
Returns
-------
Tally
A new derived tally of this tally multiplied by the scalar value.
"""
return self * other
def __rdiv__(self, other):
"""Right division with a scalar value.
This reverses the operands and calls the __div__ method.
Parameters
----------
other : Integer or Real
The scalar value to divide by this tally
Returns
-------
Tally
A new derived tally of the scalar value divided by this tally.
"""
return other * self**-1
def __pos__(self):
"""The absolute value of this tally.
Returns
-------
Tally
A new derived tally which is the absolute value of this tally.
"""
new_tally = copy.deepcopy(self)
new_tally._mean = np.abs(new_tally.mean)
return new_tally
def __neg__(self):
"""The negated value of this tally.
Returns
-------
Tally
A new derived tally which is the negated value of this tally.
"""
new_tally = self * -1
return new_tally
def get_slice(self, scores=[], filters=[], filter_bins=[], nuclides=[]):
"""Build a sliced tally for the specified filters, scores and nuclides.
This method constructs a new tally to encapsulate a subset of the data
represented by this tally. The subset of data to include in the tally
slice is determined by the scores, filters and nuclides specified in
the input parameters.
Parameters
----------
scores : list of str
A list of one or more score strings
(e.g., ['absorption', 'nu-fission']; default is [])
filters : list of str
A list of filter type strings
(e.g., ['mesh', 'energy']; default is [])
filter_bins : list of Iterables
A list of the filter bins corresponding to the filter_types
parameter (e.g., [(1,), (0., 0.625e-6)]; default is []). Each bin
in the list is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. Each bin is an integer for the
cell instance ID for 'distribcell Filters. Each bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is a (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest. The order of the bins in the list must correspond to the
filter_types parameter.
nuclides : list of str
A list of nuclide name strings
(e.g., ['U-235', 'U-238']; default is [])
Returns
-------
Tally
A new tally which encapsulates the subset of data requested in the
order each filter, nuclide and score is listed in the parameters.
Raises
------
ValueError
When this method is called before the Tally is populated with data
by the StatePoint.read_results() method.
"""
# Ensure that StatePoint.read_results() was called first
if not self.derived and self.sum is None:
msg = 'Unable to use tally arithmetic with Tally ID="{0}" ' \
'since it does not contain any results.'.format(self.id)
raise ValueError(msg)
new_tally = copy.deepcopy(self)
if self.sum is not None:
new_sum = self.get_values(scores, filters, filter_bins,
nuclides, 'sum')
new_tally.sum = new_sum
if self.sum_sq is not None:
new_sum_sq = self.get_values(scores, filters, filter_bins,
nuclides, 'sum_sq')
new_tally.sum_sq = new_sum_sq
if self.mean is not None:
new_mean = self.get_values(scores, filters, filter_bins,
nuclides, 'mean')
new_tally._mean = new_mean
if self.std_dev is not None:
new_std_dev = self.get_values(scores, filters, filter_bins,
nuclides, 'std_dev')
new_tally._std_dev = new_std_dev
# SCORES
if scores:
score_indices = []
# Determine the score indices from any of the requested scores
for score in self.scores:
if score not in scores:
score_index = self.get_score_index(score)
score_indices.append(score_index)
# Loop over indices in reverse to remove excluded scores
for score_index in reversed(score_indices):
new_tally.remove_score(self.scores[score_index])
new_tally.num_score_bins -= 1
# NUCLIDES
if nuclides:
nuclide_indices = []
# Determine the nuclide indices from any of the requested nuclides
for nuclide in self.nuclides:
if nuclide.name not in nuclides:
nuclide_index = self.get_nuclide_index(nuclide.name)
nuclide_indices.append(nuclide_index)
# Loop over indices in reverse to remove excluded Nuclides
for nuclide_index in reversed(nuclide_indices):
new_tally.remove_nuclide(self.nuclides[nuclide_index])
# FILTERS
if filters:
# Determine the filter indices from any of the requested filters
for i, filter_type in enumerate(filters):
filter = new_tally.find_filter(filter_type)
# Remove and/or reorder | |
T1 = back_circulant(5)
sage: x = isotopism( (0,1,2,3,4) )
sage: y = isotopism(5) # identity
sage: z = isotopism(5) # identity
sage: T2 = T1.apply_isotopism(x, y, z)
sage: _, t1, t2, t3 = tau123(T1, T2)
sage: U1, U2 = tau_to_bitrade(t1, t2, t3)
sage: assert is_bitrade(U1, U2)
sage: U1
[0 1 2 3 4]
[1 2 3 4 0]
[2 3 4 0 1]
[3 4 0 1 2]
[4 0 1 2 3]
sage: U2
[4 0 1 2 3]
[0 1 2 3 4]
[1 2 3 4 0]
[2 3 4 0 1]
[3 4 0 1 2]
"""
c1 = t1.to_cycles()
c2 = t2.to_cycles()
c3 = t3.to_cycles()
pt_to_cycle1 = {}
pt_to_cycle2 = {}
pt_to_cycle3 = {}
for i in range(len(c1)):
for j in range(len(c1[i])):
pt_to_cycle1[c1[i][j]] = i
for i in range(len(c2)):
for j in range(len(c2[i])):
pt_to_cycle2[c2[i][j]] = i
for i in range(len(c3)):
for j in range(len(c3[i])):
pt_to_cycle3[c3[i][j]] = i
n = max(len(c1), len(c2), len(c3))
T1 = LatinSquare(n)
T2 = LatinSquare(n)
for r in range(len(c1)):
for c in range(len(c2)):
for s in range(len(c3)):
nr_common = len(reduce(set.intersection, \
[set(c1[r]), set(c2[c]), set(c3[s])]))
assert nr_common in [0, 1]
if nr_common == 1: T1[r, c] = s
for cycle in c1:
for pt1 in cycle:
pt2 = t1[pt1 - 1]
pt3 = t2[pt2 - 1]
assert t3[pt3 - 1] == pt1
r = pt_to_cycle1[pt1]
c = pt_to_cycle2[pt2]
s = pt_to_cycle3[pt3]
T2[r, c] = s
return T1, T2
def bitrade_from_group(a, b, c, G):
"""
Given group elements a, b, c in G such that abc = 1 and the
subgroups a, b, c intersect (pairwise) only in the identity,
construct a bitrade (T1, T2) where rows, columns, and symbols
correspond to cosets of a, b, and c, respectively.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: a, b, c, G = alternating_group_bitrade_generators(1)
sage: (T1, T2) = bitrade_from_group(a, b, c, G)
sage: T1
[ 0 -1 3 1]
[-1 1 0 2]
[ 1 3 2 -1]
[ 2 0 -1 3]
sage: T2
[ 1 -1 0 3]
[-1 0 2 1]
[ 2 1 3 -1]
[ 0 3 -1 2]
"""
hom = gap.ActionHomomorphism(G, gap.RightCosets(G, gap.TrivialSubgroup(G)), gap.OnRight)
t1 = gap.Image(hom, a)
t2 = gap.Image(hom, b)
t3 = gap.Image(hom, c)
t1 = Permutation(str(t1).replace('\n', ''))
t2 = Permutation(str(t2).replace('\n', ''))
t3 = Permutation(str(t3).replace('\n', ''))
return tau_to_bitrade(t1, t2, t3)
def is_disjoint(T1, T2):
"""
The partial latin squares T1 and T2 are disjoint if T1[r, c] !=
T2[r, c] or T1[r, c] == T2[r, c] == -1 for each cell [r, c].
EXAMPLES::
sage: from sage.combinat.matrices.latin import is_disjoint, back_circulant, isotopism
sage: is_disjoint(back_circulant(2), back_circulant(2))
False
::
sage: T1 = back_circulant(5)
sage: x = isotopism( (0,1,2,3,4) )
sage: y = isotopism(5) # identity
sage: z = isotopism(5) # identity
sage: T2 = T1.apply_isotopism(x, y, z)
sage: is_disjoint(T1, T2)
True
"""
for i in range(T1.nrows()):
for j in range(T1.ncols()):
if T1[i, j] < 0 and T2[i, j] < 0: continue
if T1[i, j] == T2[i, j]:
return False
return True
def is_same_shape(T1, T2):
"""
Two partial latin squares T1, T2 have the same shape if T1[r, c] =
0 if and only if T2[r, c] = 0.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: is_same_shape(elementary_abelian_2group(2), back_circulant(4))
True
sage: is_same_shape(LatinSquare(5), LatinSquare(5))
True
sage: is_same_shape(forward_circulant(5), LatinSquare(5))
False
"""
for i in range(T1.nrows()):
for j in range(T1.ncols()):
if T1[i, j] < 0 and T2[i, j] < 0: continue
if T1[i, j] >= 0 and T2[i, j] >= 0: continue
return False
return True
def is_row_and_col_balanced(T1, T2):
"""
Partial latin squares T1 and T2 are balanced if the symbols
appearing in row r of T1 are the same as the symbols appearing in
row r of T2, for each r, and if the same condition holds on
columns.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: T1 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: T2 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: is_row_and_col_balanced(T1, T2)
True
sage: T2 = matrix([[0,3,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: is_row_and_col_balanced(T1, T2)
False
"""
for r in range(T1.nrows()):
val1 = set(x for x in T1.row(r) if x >= 0)
val2 = set(x for x in T2.row(r) if x >= 0)
if val1 != val2: return False
for c in range(T1.ncols()):
val1 = set(x for x in T1.column(c) if x >= 0)
val2 = set(x for x in T2.column(c) if x >= 0)
if val1 != val2: return False
return True
def dlxcpp_rows_and_map(P):
"""
Internal function for dlxcpp_find_completions. Given a partial
latin square P we construct a list of rows of a 0-1 matrix M such
that an exact cover of M corresponds to a completion of P to a
latin square.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: dlxcpp_rows_and_map(LatinSquare(2))
([[0, 4, 8],
[1, 5, 8],
[2, 4, 9],
[3, 5, 9],
[0, 6, 10],
[1, 7, 10],
[2, 6, 11],
[3, 7, 11]],
{(0, 4, 8): (0, 0, 0),
(0, 6, 10): (1, 0, 0),
(1, 5, 8): (0, 0, 1),
(1, 7, 10): (1, 0, 1),
(2, 4, 9): (0, 1, 0),
(2, 6, 11): (1, 1, 0),
(3, 5, 9): (0, 1, 1),
(3, 7, 11): (1, 1, 1)})
"""
assert P.nrows() == P.ncols()
n = P.nrows()
# We will need 3n^2 columns in total:
#
# n^2 for the xCy columns
# n^2 for the xRy columns
# n^2 for the xy columns
dlx_rows = []
cmap = {}
for r in range(n):
valsrow = P.vals_in_row(r)
for c in range(n):
valscol = P.vals_in_col(c)
for e in range(n):
# These should be constants
c_OFFSET = e + c*n
r_OFFSET = e + r*n + n*n
xy_OFFSET = 2*n*n + r*n + c
cmap[(c_OFFSET, r_OFFSET, xy_OFFSET)] = (r,c,e)
#if P[r, c] >= 0: continue
# We only want the correct value to pop in here
if P[r, c] >= 0 and P[r, c] != e: continue
if P[r, c] < 0 and e in valsrow: continue
if P[r, c] < 0 and e in valscol: continue
dlx_rows.append([c_OFFSET, r_OFFSET, xy_OFFSET])
return dlx_rows, cmap
def dlxcpp_find_completions(P, nr_to_find = None):
"""
Returns a list of all latin squares L of the same order as P such
that P is contained in L. The optional parameter nr_to_find
limits the number of latin squares that are found.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: dlxcpp_find_completions(LatinSquare(2))
[[0 1]
[1 0], [1 0]
[0 1]]
::
sage: dlxcpp_find_completions(LatinSquare(2), 1)
[[0 1]
[1 0]]
"""
assert P.nrows() == P.ncols()
n = P.nrows()
dlx_rows, cmap = dlxcpp_rows_and_map(P)
SOLUTIONS = {}
for x in DLXCPP(dlx_rows):
x.sort()
SOLUTIONS[tuple(x)] = True
if nr_to_find is not None and len(SOLUTIONS) >= nr_to_find: break
comps = []
for i in SOLUTIONS:
soln = list(i)
from copy import deepcopy
Q = deepcopy(P)
for x in soln:
(r, c, e) = cmap[tuple(dlx_rows[x])]
if Q[r, c] >= 0:
assert Q[r, c] == e
else:
Q[r, c] = e
comps.append(Q)
return comps
def bitrade(T1, T2):
r"""
Form the bitrade (Q1, Q2) from (T1, T2) by setting empty the cells
(r, c) such that T1[r, c] == T2[r, c].
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: B1 = back_circulant(5)
sage: alpha = isotopism((0,1,2,3,4))
sage: beta = isotopism((1,0,2,3,4))
sage: gamma = isotopism((2,1,0,3,4))
sage: B2 = B1.apply_isotopism(alpha, beta, gamma)
sage: T1, T2 = bitrade(B1, B2)
sage: T1
[ 0 1 -1 3 4]
[ 1 -1 -1 4 0]
[ 2 -1 4 0 1]
[ 3 4 0 1 2]
[ 4 0 1 2 3]
sage: T2
[ 3 4 -1 0 1]
[ 0 -1 -1 1 4]
[ 1 -1 0 4 2]
[ 4 0 1 2 3]
[ 2 1 4 3 0]
"""
assert T1.nrows() == T1.ncols()
assert T2.nrows() == T2.ncols()
assert T1.nrows() == T2.nrows()
n = T1.nrows()
from copy import copy
Q1 = copy(T1)
Q2 = copy(T2)
for r in range(n):
for c in range(n):
if T1[r, c] == T2[r, c]:
Q1[r, c] = | |
Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None
:type out_block: torch.tensor [FloatTensor], optional
:param indices: Indices used for unpooling operation, defaults to None
:type indices: torch.tensor, optional
:param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None
:type weights: torch.tensor, optional
:return: Forward pass
:rtype: torch.tensor
"""
unpool = self.unpool(input, indices)
if out_block is not None:
concat = torch.cat((out_block, unpool), dim=1)
else:
concat = unpool
out_block = super(SDnetDecoderBlock, self).forward(concat, weights)
if self.SELayer:
out_block = self.SELayer(out_block, weights)
if self.drop_out_needed:
out_block = self.drop_out(out_block)
return out_block
class SDNetNoBNEncoderBlock(nn.Module):
"""
Encoder Block for Bayesian Network
"""
def __init__(self, params):
super(SDNetNoBNEncoderBlock, self).__init__()
padding_h = int((params['kernel_h'] - 1) / 2)
padding_w = int((params['kernel_w'] - 1) / 2)
self.out_channel = params['num_filters']
self.conv = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=(
params['kernel_h'], params['kernel_w']),
padding=(padding_h, padding_w),
stride=params['stride_conv'])
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(
kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True)
def forward(self, input):
x1 = self.conv(input)
x2 = self.relu(x1)
out_encoder, indices = self.maxpool(x2)
return out_encoder, x2, indices
class SDNetNoBNDecoderBlock(nn.Module):
"""
Decoder Block for Bayesian Network
"""
def __init__(self, params):
super(SDNetNoBNDecoderBlock, self).__init__()
padding_h = int((params['kernel_h'] - 1) / 2)
padding_w = int((params['kernel_w'] - 1) / 2)
self.out_channel = params['num_filters']
self.conv = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=(
params['kernel_h'], params['kernel_w']),
padding=(padding_h, padding_w),
stride=params['stride_conv'])
self.relu = nn.ReLU()
self.unpool = nn.MaxUnpool2d(
kernel_size=params['pool'], stride=params['stride_pool'])
def forward(self, input, out_block=None, indices=None):
unpool = self.unpool(input, indices)
if out_block is not None:
concat = torch.cat((out_block, unpool), dim=1)
else:
concat = unpool
x1 = self.conv(concat)
x2 = self.relu(x1)
return x2
class ConcatBlock(nn.Module):
def __init__(self, params):
super(ConcatBlock, self).__init__()
self.broadcasting_needed = params['broadcasting_needed']
def forward(self, input, another_input):
if self.broadcasting_needed:
n, c, h, w = input.shape
modified_inp = another_input.expand(h, w)
else:
modified_inp = another_input
if len(modified_inp.shape) == 3:
modified_inp = modified_inp.unsqueeze(0)
concat = torch.cat((input, modified_inp), dim=1)
return concat
class DenseBlockNoBN(nn.Module):
"""Block with dense connections
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: forward passed tensor
:rtype: torch.tonsor [FloatTensor]
"""
def __init__(self, params, se_block_type=None):
super(DenseBlockNoBN, self).__init__()
if se_block_type == se.SELayer.CSE.value:
self.SELayer = se.ChannelSELayer(params['num_filters'])
elif se_block_type == se.SELayer.SSE.value:
self.SELayer = se.SpatialSELayer(params['num_filters'])
elif se_block_type == se.SELayer.CSSE.value:
self.SELayer = se.ChannelSpatialSELayer(params['num_filters'])
else:
self.SELayer = None
padding_h = int((params['kernel_h'] - 1) / 2)
padding_w = int((params['kernel_w'] - 1) / 2)
conv1_out_size = int(params['num_channels'] + params['num_filters'])
conv2_out_size = int(
params['num_channels'] + params['num_filters'] + params['num_filters'])
self.conv1 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=(
params['kernel_h'], params['kernel_w']),
padding=(padding_h, padding_w),
stride=params['stride_conv'])
self.conv2 = nn.Conv2d(in_channels=conv1_out_size, out_channels=params['num_filters'],
kernel_size=(
params['kernel_h'], params['kernel_w']),
padding=(padding_h, padding_w),
stride=params['stride_conv'])
self.conv3 = nn.Conv2d(in_channels=conv2_out_size, out_channels=params['num_filters'],
kernel_size=(1, 1),
padding=(0, 0),
stride=params['stride_conv'])
# self.batchnorm1 = nn.BatchNorm2d(num_features=params['num_channels'])
# self.batchnorm2 = nn.BatchNorm2d(num_features=conv1_out_size)
# self.batchnorm3 = nn.BatchNorm2d(num_features=conv2_out_size)
self.prelu = nn.PReLU()
if params['drop_out'] > 0:
self.drop_out_needed = True
self.drop_out = nn.Dropout2d(params['drop_out'])
else:
self.drop_out_needed = False
def forward(self, input):
"""Forward pass
:param input: Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:return: Forward passed tensor
:rtype: torch.tensor [FloatTensor]
"""
# o1 = self.batchnorm1(input)
o2 = self.prelu(input)
o3 = self.conv1(o2)
o4 = torch.cat((input, o3), dim=1)
# o5 = self.batchnorm2(o4)
o6 = self.prelu(o4)
o7 = self.conv2(o6)
o8 = torch.cat((input, o3, o7), dim=1)
# o9 = self.batchnorm3(o8)
o10 = self.prelu(o8)
out = self.conv3(o10)
return out
class EncoderBlockNoBN(DenseBlockNoBN):
"""Dense encoder block with maxpool and an optional SE block
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: output tensor with maxpool, output tensor without maxpool, indices for unpooling
:rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor]
"""
def __init__(self, params, se_block_type=None):
super(EncoderBlockNoBN, self).__init__(params, se_block_type=se_block_type)
self.maxpool = nn.MaxPool2d(
kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True)
def forward(self, input, weights=None):
"""Forward pass
:param input: Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None
:type weights: torch.tensor, optional
:return: output tensor with maxpool, output tensor without maxpool, indices for unpooling
:rtype: torch.tensor [FloatTensor], torch.tensor [FloatTensor], torch.tensor [LongTensor]
"""
out_block = super(EncoderBlockNoBN, self).forward(input)
if self.SELayer:
out_block = self.SELayer(out_block, weights)
if self.drop_out_needed:
out_block = self.drop_out(out_block)
out_encoder, indices = self.maxpool(out_block)
return out_encoder, out_block, indices
class DecoderBlockNoBN(DenseBlockNoBN):
"""Dense decoder block with maxunpool and an optional skip connections and SE block
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: forward passed tensor
:rtype: torch.tensor [FloatTensor]
"""
def __init__(self, params, se_block_type=None):
super(DecoderBlockNoBN, self).__init__(params, se_block_type=se_block_type)
self.unpool = nn.MaxUnpool2d(
kernel_size=params['pool'], stride=params['stride_pool'])
def forward(self, input, out_block=None, indices=None, weights=None):
"""Forward pass
:param input: Input tensor, shape = (N x C x H x W)
:type input: torch.tensor [FloatTensor]
:param out_block: Tensor for skip connection, shape = (N x C x H x W), defaults to None
:type out_block: torch.tensor [FloatTensor], optional
:param indices: Indices used for unpooling operation, defaults to None
:type indices: torch.tensor, optional
:param weights: Weights used for squeeze and excitation, shape depends on the type of SE block, defaults to None
:type weights: torch.tensor, optional
:return: Forward passed tensor
:rtype: torch.tensor [FloatTensor]
"""
if indices is not None:
unpool = self.unpool(input, indices)
else:
# TODO: Implement Conv Transpose
print("You have to use Conv Transpose")
if out_block is not None:
concat = torch.cat((out_block, unpool), dim=1)
else:
concat = unpool
out_block = super(DecoderBlockNoBN, self).forward(concat)
if self.SELayer:
out_block = self.SELayer(out_block, weights)
if self.drop_out_needed:
out_block = self.drop_out(out_block)
return out_block
class FullyPreActivatedResBlock(nn.Module):
def __init__(self, params, concat_extra):
super(FullyPreActivatedResBlock, self).__init__()
# padding_h = int((params['kernel_h'] - 1) / 2)
# padding_w = int((params['kernel_w'] - 1) / 2)
# self.conv = nn.Conv2d(in_channels=params['num_channels']+concat_extra, out_channels=params['num_filters'],
# kernel_size=(
# params['kernel_h'], params['kernel_w']),
# padding=(padding_h, padding_w),
# stride=params['stride_conv'])
input_size = params['num_channels']+concat_extra
self.conv1 = nn.Conv2d(in_channels=input_size, out_channels=params['num_filters'],
kernel_size= (3,3),
padding=(1,1),
stride=params['stride_conv'])
self.conv2 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=( 3,3),
padding=(1,1),
stride=params['stride_conv'])
self.conv3 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=( 3,3),
padding=(1,1),
stride=params['stride_conv'])
self.conv4 = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=( 3,3),
padding=(1,1),
stride=params['stride_conv'])
self.batchnorm1 = nn.BatchNorm2d(num_features=input_size)
self.batchnorm2 = nn.BatchNorm2d(num_features=params['num_channels'])
self.batchnorm3 = nn.BatchNorm2d(num_features=params['num_channels'])
self.batchnorm4 = nn.BatchNorm2d(num_features=params['num_channels'])
self.prelu = nn.PReLU()
def forward(self, input, depth):
#return input
# input = self.conv(input)
if depth >= 1:
o1 = self.batchnorm1(input)
o2 = self.prelu(o1)
o4 = self.conv1(o2)
# # out = o3
# o5 = self.batchnorm2(o3)
# o6 = self.prelu(o5)
# o7 = self.conv2(o6)
#
# o8 = o3 + o7
# # o8 = torch.stack([o3,o7], dim=0).sum(dim=1)
#
# #
# o9 = self.batchnorm2(o8)
# o10 = self.prelu(o9)
# o11 = self.conv2(o10)
# #
# # # o12 = o7 + o11
# # #
# # # o13 = self.batchnorm4(o12)
# # # o14 = self.prelu(o13)
# # # o15 = self.conv4(o14)
out = o4
if depth >= 2:
o5 = self.batchnorm2(o4)
o6 = self.prelu(o5)
o7 = self.conv2(o6)
o8 = o4 + o7
out = o8
if depth >= 3:
o9 = self.batchnorm3(o8)
o10 = self.prelu(o9)
o11 = self.conv3(o10)
o12 = o11 + o8
out = o12
if depth >= 4:
o13 = self.batchnorm4(o12)
o14 = self.prelu(o13)
o15 = self.conv4(o14)
o16 = o15 + o12
out = o16
if depth > 4:
raise Exception('Depth more than 4 does not supported!!!')
return out
class FullBayesianDenseBlock(nn.Module):
"""Block with dense connections
:param params: {
'num_channels':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':28,
'se_block': se.SELayer.None,
'drop_out':0,2}
:type params: dict
:param se_block_type: Squeeze and Excite block type to be included, defaults to None
:type se_block_type: str, valid options are {'NONE', 'CSE', 'SSE', 'CSSE'}, optional
:return: forward passed tensor
:rtype: torch.tonsor [FloatTensor]
"""
def __init__(self, params, se_block_type=None):
super(FullBayesianDenseBlock, self).__init__()
if se_block_type == se.SELayer.CSE.value:
self.SELayer = se.ChannelSELayer(params['num_filters'])
elif se_block_type == se.SELayer.SSE.value:
| |
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"or_equal": None,
"parse_strings_as_datetimes": None,
"allow_cross_type_comparisons": None,
"ignore_row_if": "both_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_column_pair_values_to_be_in_set": {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
"success_kwargs": ["value_pairs_set", "ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "both_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"expect_multicolumn_values_to_be_unique": {
"domain_kwargs": ["column_list", "row_condition", "condition_parser"],
"success_kwargs": ["ignore_row_if"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"ignore_row_if": "all_values_are_missing",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_of_type__aggregate": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_of_type__map": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_in_type_list__aggregate": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
"_expect_column_values_to_be_in_type_list__map": {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
"success_kwargs": ["type_list", "mostly"],
"default_kwarg_values": {
"row_condition": None,
"condition_parser": "pandas",
"mostly": None,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
},
},
}
runtime_kwargs = ["result_format", "include_config", "catch_exceptions"]
def __init__(self, expectation_type, kwargs, meta=None, success_on_last_run=None):
if not isinstance(expectation_type, str):
raise InvalidExpectationConfigurationError(
"expectation_type must be a string"
)
self._expectation_type = expectation_type
if not isinstance(kwargs, dict):
raise InvalidExpectationConfigurationError(
"expectation configuration kwargs must be a dict."
)
self._kwargs = kwargs
if meta is None:
meta = {}
# We require meta information to be serializable, but do not convert until necessary
ensure_json_serializable(meta)
self.meta = meta
self.success_on_last_run = success_on_last_run
def patch(self, op: str, path: str, value: Any) -> "ExpectationConfiguration":
"""
Args:
op: A jsonpatch operation. One of 'add', 'replace', or 'remove'
path: A jsonpatch path for the patch operation
value: The value to patch
Returns:
The patched ExpectationConfiguration object
"""
if op not in ["add", "replace", "remove"]:
raise ValueError("Op must be either 'add', 'replace', or 'remove'")
try:
valid_path = path.split("/")[1]
except IndexError:
raise IndexError(
"Ensure you have a valid jsonpatch path of the form '/path/foo' "
"(see http://jsonpatch.com/)"
)
if valid_path not in self.get_runtime_kwargs().keys():
raise ValueError("Path not available in kwargs (see http://jsonpatch.com/)")
# TODO: Call validate_kwargs when implemented
patch = jsonpatch.JsonPatch([{"op": op, "path": path, "value": value}])
patch.apply(self.kwargs, in_place=True)
return self
@property
def expectation_type(self):
return self._expectation_type
@property
def kwargs(self):
return self._kwargs
def _get_default_custom_kwargs(self):
# NOTE: this is a holdover until class-first expectations control their
# defaults, and so defaults are inherited.
if self.expectation_type.startswith("expect_column_pair"):
return {
"domain_kwargs": [
"column_A",
"column_B",
"row_condition",
"condition_parser",
],
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": [],
"default_kwarg_values": {
"column_A": None,
"column_B": None,
"row_condition": None,
"condition_parser": None,
},
}
elif self.expectation_type.startswith("expect_column"):
return {
"domain_kwargs": ["column", "row_condition", "condition_parser"],
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": [],
"default_kwarg_values": {
"column": None,
"row_condition": None,
"condition_parser": None,
},
}
logger.error("Requested kwargs for an unrecognized expectation.")
return {
"domain_kwargs": [],
# NOTE: this is almost certainly incomplete; subclasses should override
"success_kwargs": [],
"default_kwarg_values": {},
}
def get_domain_kwargs(self):
expectation_kwargs_dict = self.kwarg_lookup_dict.get(
self.expectation_type, None
)
if expectation_kwargs_dict is None:
expectation_kwargs_dict = self._get_default_custom_kwargs()
domain_kwargs = {
key: self.kwargs.get(
key, expectation_kwargs_dict.get("default_kwarg_values").get(key)
)
for key in expectation_kwargs_dict["domain_kwargs"]
}
missing_kwargs = set(expectation_kwargs_dict["domain_kwargs"]) - set(
domain_kwargs.keys()
)
if missing_kwargs:
raise InvalidExpectationKwargsError(
f"Missing domain kwargs: {list(missing_kwargs)}"
)
return domain_kwargs
def get_success_kwargs(self):
expectation_kwargs_dict = self.kwarg_lookup_dict.get(
self.expectation_type, None
)
if expectation_kwargs_dict is None:
expectation_kwargs_dict = self._get_default_custom_kwargs()
domain_kwargs = self.get_domain_kwargs()
success_kwargs = {
key: self.kwargs.get(
key, expectation_kwargs_dict.get("default_kwarg_values").get(key)
)
for key in expectation_kwargs_dict["success_kwargs"]
}
success_kwargs.update(domain_kwargs)
return success_kwargs
def get_runtime_kwargs(self):
expectation_kwargs_dict = self.kwarg_lookup_dict.get(
self.expectation_type, None
)
if expectation_kwargs_dict is None:
expectation_kwargs_dict = self._get_default_custom_kwargs()
success_kwargs = self.get_success_kwargs()
runtime_kwargs = {
key: self.kwargs.get(
key, expectation_kwargs_dict.get("default_kwarg_values").get(key)
)
for key in self.runtime_kwargs
}
runtime_kwargs.update(success_kwargs)
return runtime_kwargs
def applies_to_same_domain(self, other_expectation_configuration):
if (
not self.expectation_type
== other_expectation_configuration.expectation_type
):
return False
return (
self.get_domain_kwargs()
== other_expectation_configuration.get_domain_kwargs()
)
def isEquivalentTo(self, other, match_type="success"):
"""ExpectationConfiguration equivalence does not include meta, and relies on *equivalence* of kwargs."""
if not isinstance(other, self.__class__):
if isinstance(other, dict):
try:
other = expectationConfigurationSchema.load(other)
except ValidationError:
logger.debug(
"Unable to evaluate equivalence of ExpectationConfiguration object with dict because "
"dict other could not be instantiated as an ExpectationConfiguration"
)
return NotImplemented
else:
# Delegate comparison to the other instance
return NotImplemented
if match_type == "domain":
return all(
(
self.expectation_type == other.expectation_type,
self.get_domain_kwargs() == other.get_domain_kwargs(),
)
)
elif match_type == "success":
return all(
(
self.expectation_type == other.expectation_type,
self.get_success_kwargs() == other.get_success_kwargs(),
)
)
elif match_type == "runtime":
return all(
(
self.expectation_type == other.expectation_type,
self.kwargs == other.kwargs,
)
)
def __eq__(self, other):
"""ExpectationConfiguration equality does include meta, but ignores instance identity."""
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return all(
(
self.expectation_type == other.expectation_type,
self.kwargs == other.kwargs,
self.meta == other.meta,
)
)
def __ne__(self, other):
# By using the == operator, the returned NotImplemented is handled correctly.
return not self == other
def __repr__(self):
return json.dumps(self.to_json_dict())
def __str__(self):
return json.dumps(self.to_json_dict(), indent=2)
def to_json_dict(self):
myself = expectationConfigurationSchema.dump(self)
# NOTE - JPC - 20191031: migrate to expectation-specific schemas that subclass result with properly-typed
# schemas to get serialization all-the-way down via dump
myself["kwargs"] = convert_to_json_serializable(myself["kwargs"])
return myself
def get_evaluation_parameter_dependencies(self):
parsed_dependencies = dict()
for key, value in self.kwargs.items():
if isinstance(value, dict) and "$PARAMETER" in value:
param_string_dependencies = find_evaluation_parameter_dependencies(
value["$PARAMETER"]
)
nested_update(parsed_dependencies, param_string_dependencies)
dependencies = dict()
urns = parsed_dependencies.get("urns", [])
for string_urn in urns:
try:
urn = ge_urn.parseString(string_urn)
except ParserError:
logger.warning(
"Unable to parse great_expectations urn {}".format(
value["$PARAMETER"]
)
)
continue
if not urn.get("metric_kwargs"):
nested_update(
dependencies, {urn["expectation_suite_name"]: [urn["metric_name"]]},
)
else:
nested_update(
dependencies,
{
urn["expectation_suite_name"]: [
{
"metric_kwargs_id": {
urn["metric_kwargs"]: [urn["metric_name"]]
}
}
]
},
)
dependencies = _deduplicate_evaluation_parameter_dependencies(dependencies)
return dependencies
class ExpectationConfigurationSchema(Schema):
expectation_type = fields.Str(
required=True,
error_messages={
"required": "expectation_type missing in expectation configuration"
},
)
kwargs = fields.Dict()
meta = fields.Dict()
# noinspection PyUnusedLocal
@post_load
def make_expectation_configuration(self, data, **kwargs):
return ExpectationConfiguration(**data)
# TODO: re-enable once we can allow arbitrary keys but still add this sort of validation
# class MetaDictSchema(Schema):
# """The MetaDict """
#
# # noinspection PyUnusedLocal
# @validates_schema
# def validate_json_serializable(self, data, **kwargs):
# import json
# try:
# json.dumps(data)
# except (TypeError, OverflowError):
# raise ValidationError("meta information must be json serializable.")
class ExpectationSuite(object):
"""
This ExpectationSuite object has create, read, update, and delete functionality for its expectations:
-create: self.add_expectation()
-read: self.find_expectation_indexes()
-update: self.add_expectation() or self.patch_expectation()
-delete: self.remove_expectation()
"""
def __init__(
self,
expectation_suite_name,
expectations=None,
evaluation_parameters=None,
data_asset_type=None,
meta=None,
):
self.expectation_suite_name = expectation_suite_name
if expectations is None:
expectations = []
self.expectations = [
ExpectationConfiguration(**expectation)
if isinstance(expectation, dict)
else expectation
for expectation in expectations
]
if evaluation_parameters is None:
evaluation_parameters = {}
self.evaluation_parameters = evaluation_parameters
self.data_asset_type = data_asset_type
if meta is None:
meta = {"great_expectations_version": ge_version}
if (
"great_expectations.__version__" not in meta.keys()
and "great_expectations_version" not in meta.keys()
):
meta["great_expectations_version"] = ge_version
# We require meta information to be serializable, but do not convert until necessary
ensure_json_serializable(meta)
self.meta = meta
def add_citation(
self,
comment,
batch_kwargs=None,
batch_markers=None,
batch_parameters=None,
citation_date=None,
):
if "citations" not in self.meta:
self.meta["citations"] = []
self.meta["citations"].append(
{
"citation_date": citation_date
or datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
),
"batch_kwargs": batch_kwargs,
"batch_markers": batch_markers,
"batch_parameters": batch_parameters,
"comment": comment,
}
)
def isEquivalentTo(self, other):
"""
ExpectationSuite equivalence relies only on expectations and evaluation parameters. It does not include:
- data_asset_name
- expectation_suite_name
- meta
- data_asset_type
"""
if not isinstance(other, self.__class__):
if isinstance(other, dict):
try:
other = expectationSuiteSchema.load(other)
except ValidationError:
logger.debug(
"Unable to evaluate equivalence of ExpectationConfiguration object with dict because "
"dict other could not be instantiated as an ExpectationConfiguration"
)
return NotImplemented
else:
# Delegate comparison to the other instance
return NotImplemented
return len(self.expectations) == len(other.expectations) and all(
[
mine.isEquivalentTo(theirs)
for (mine, theirs) in zip(self.expectations, other.expectations)
]
)
def __eq__(self, other):
"""ExpectationSuite equality ignores instance identity, relying only on properties."""
if not isinstance(other, self.__class__):
# Delegate comparison to the other instance's __eq__.
return NotImplemented
return all(
(
self.expectation_suite_name == other.expectation_suite_name,
self.expectations == other.expectations,
self.evaluation_parameters == other.evaluation_parameters,
self.data_asset_type == other.data_asset_type,
self.meta == other.meta,
)
)
def __ne__(self, other):
# By using the == operator, the returned NotImplemented is handled correctly.
return not self == other
def __repr__(self):
return json.dumps(self.to_json_dict(), | |
# -*- coding: utf-8 -*-
from __future__ import print_function
import stmpy
import sys
import numpy as np
import os
import matplotlib as mpl
#import scipy.interpolate as sin #this is a stupid name for this package...
from scipy.interpolate import interp1d
import scipy.optimize as opt
import scipy.ndimage as snd
from scipy.signal import butter, filtfilt, fftconvolve, hilbert, correlate
def interp2d(x, y, z, kind='nearest', **kwargs):
'''
An extension for scipy.interpolate.interp2d() which adds a 'nearest'
neighbor interpolation.
See help(scipy.interpolate.interp2d) for details.
Inputs:
x - Required : Array contining x values for data points.
y - Required : Array contining y values for data points.
z - Required : Array contining z values for data points.
kind - Optional : Sting for interpolation scheme. Options are:
'nearest', 'linear', 'cubic', 'quintic'. Note
that 'linear', 'cubic', 'quintic' use spline.
**kwargs - Optional : Keyword arguments passed to
scipy.interpolate.interp2d
Returns:
f(x,y) - Callable function which will return interpolated values.
History:
2017-08-24 - HP : Initial commit.
'''
from scipy.interpolate import NearestNDInterpolator
if kind is 'nearest':
X, Y = np.meshgrid(x ,y)
points = np.array([X.flatten(), Y.flatten()]).T
values = z.flatten()
fActual = NearestNDInterpolator(points, values)
def fCall(x, y):
if type(x) is not np.ndarray:
lx = 1
else:
lx = x.shape[0]
if type(y) is not np.ndarray:
ly = 1
else:
ly = y.shape[0]
X, Y = np.meshgrid(x ,y)
points = np.array([X.flatten(), Y.flatten()]).T
values = fActual(points)
return values.reshape(lx, ly)
return fCall
else:
from scipy.interpolate import interp2d as scinterp2d
return scinterp2d(x, y, z, kind=kind, **kwargs)
def azimuthalAverage(F, x0, y0, r, theta=np.linspace(0,2*np.pi,500),
kind='linear'):
''' Uses 2D interpolation to average F over an arc defined by theta
for every r value starting from x0,y0.
History:
2017-08-24 - HP : Modified to use stmpy.tools.interp2d().
'''
f = interp2d(np.arange(F.shape[1]), np.arange(F.shape[0]), F, kind=kind)
Z = np.zeros_like(r); fTheta = np.zeros_like(theta)
for ix, r0 in enumerate(r):
x = r0*np.cos(theta) + x0
y = r0*np.sin(theta) + y0
for iy, (xn,yn) in enumerate(zip(x,y)):
fTheta[iy] = f(xn,yn)
Z[ix] = np.mean(fTheta)
return Z
def azimuthalAverageRaw(F,x0,y0,rmax):
''' Azimuthally average beginning at x0,y0 to a maximum distance rmax'''
f=[]; p=[]; R=[]; FAvg=[]
for x in range(F.shape[1]):
for y in range(F.shape[0]):
r = np.sqrt((x-x0)**2 + (y-y0)**2)
if r <= rmax:
p.append(r)
f.append(F[y,x])
for r0 in set(np.sort(p)):
R.append(r0)
allFVals = [f0 for ix,f0 in enumerate(f) if p[ix] == r0]
FAvg.append(np.mean(allFVals))
R = np.array(R); ixSorted = R.argsort()
return R[ixSorted], np.array(FAvg)[ixSorted]
def arc_linecut(data, p0, length, angle, width=20, dl=0, dw=100, kind='linear',
show=False, ax=None, **kwarg):
'''A less cumbersome wrapper for stmpy.tools.azimuthalAverage. Computes an
arc-averaged linecut on 2D data, or on each layer in 3D data.
Inputs:
data - Required : A 2D or 3D numpy array.
p0 - Required : A tuple containing indicies for the start of the
linecut: p0=(x0,y0)
length - Required : Float containing length of linecut to compute.
angle - Required : Angle (IN DEGREES) to take the linecut along.
width - Optional : Angle (IN DEGREES) to average over.
dl - Optional : Extra pixels for interpolation in the linecut
direction.
dw - Optional : Number of pixels for interpolation in the
azimuthal direction: default 100.
kind - Optional : Sting for interpolation scheme. Options are:
'nearest', 'linear', 'cubic', 'quintic'. Note
that 'linear', 'cubic', 'quintic' use spline.
show - Optional : Boolean determining whether to plot where the
linecut was taken.
ax - Optional : Matplotlib axes instance to plot where linecut is
taken. Note, if show=True you MUST provide and
axes instance as plotting is done using ax.plot().
**kwarg - Optional : Additional keyword arguments passed to ax.plot().
Returns:
r - 1D numpy array which goes from 0 to the length of the cut.
cut - 1D or 2D numpy array containg the linecut.
Usage:
r, cut = arc_linecut(data, cen, length, angle, width=20, dl=0, dw=100,
show=False, ax=None, **kwarg):
History:
2017-07-20 - HP : Initial commit.
2017-08-24 - HP : Modified to use stmpy.tools.interp2d() for
interpolation, which allows for 'nearest'.
'''
theta = np.radians(angle)
dtheta = np.radians(width/2.0)
r = np.linspace(0, length, round(length+dl))
t = np.linspace(theta-dtheta, theta+dtheta, round(dw))
if len(data.shape) == 2:
cut = azimuthalAverage(data, p0[0], p0[1], r, t, kind=kind)
elif len(data.shape) == 3:
cut = np.zeros([data.shape[0], len(r)])
for ix, layer in enumerate(data):
cut[ix] = azimuthalAverage(layer, p0[0], p0[1], r, t, kind=kind)
else:
raise TypeError('Data must be 2D or 3D numpy array.')
if show:
ax.plot([p0[0], p0[0]+length*np.cos(theta-dtheta)],
[p0[1], p0[1]+length*np.sin(theta-dtheta)], 'k--', lw=1, **kwarg)
ax.plot([p0[0], p0[0]+length*np.cos(theta+dtheta)],
[p0[1], p0[1]+length*np.sin(theta+dtheta)], 'k--', lw=1, **kwarg)
return r, cut
def binData(x,y,nBins):
''' For any randomly sampled data x,y, return a histogram with linear bin spacing'''
# Issues: What if there are no elements in a bin?
binSize = max(x)/nBins; X=[];Y=[]
for n in range(nBins):
allBinYVal = []
minR = n * binSize; maxR = (n+1) * binSize;
for ix,R in enumerate(x):
if R >= minR and R < maxR:
allBinYVal.append(y[ix])
X.append( (minR + maxR) / 2.0 )
Y.append( np.mean(allBinYVal) )
return X,Y
def linecut_old(F, x1, y1, x2, y2, n):
''' Use linear interpolation on a 2D data set F, sample along a line from (x1,y1) to (x2,y2) in n points
Usage: x_linecut, y_linecut = linecut(image, x1, y1, x2, y2, n)
History:
2017-06-19 - HP : Changed name to linecut_old (will be replaced by
linecut)
'''
x = np.arange(F.shape[0])
y = np.arange(F.shape[1])
cen = np.sqrt((x1-x2)**2 + (y1-y2)**2) / 2.0
r = np.linspace(-1*cen, cen, n)
f = interp2d(x, y, F, kind = 'linear')
xval = np.linspace(x1, x2, n)
yval = np.linspace(y1, y2, n)
z = [f(xval[ix],yval[ix])[0] for ix in range(n)]
return r, np.array(z)
def squareCrop(image,m=None):
''' Crops a 2D image to be mxm, where m is an even integer. '''
image = np.array(image)
a,b =image.shape
if m is None: m = min(a,b)
if m%2 != 0: m-=1
imageCrop = image[:m,:m]
return imageCrop
def lineCrop(x, y, cropRange):
''' Crops a 1D line using a list of start, stop values. Can delete sections of data
Usage: xCrop,yCrop = lineCrop(x, y, [start,stop,start,stop,...])'''
cropIndex = []; xCrop = []; yCrop = []
for cropVal in cropRange:
a = [ix for ix,x0 in enumerate(x) if x0 >= cropVal]
if a == []:
if cropVal <= x[0]: a = [0]
else: a = [len(x)-1]
cropIndex.append(a[0])
for ix in range(0,len(cropIndex),2):
xCrop += x.tolist()[cropIndex[ix]:cropIndex[ix+1]+1]
yCrop += y.tolist()[cropIndex[ix]:cropIndex[ix+1]+1]
return np.array(xCrop), np.array(yCrop)
def removePolynomial1d(y, n, x=None, fitRange=None):
''' Removes a background polynomial of degree n to the line y(x) in the range fitRange (optional).
Usage: yCorrected = removePolynomial1d(y, n)'''
if x is None: x=np.linspace(0, 1, len(y))
if fitRange is None: fitRange = [x[0],x[-1]]
xBkg,yBkg = lineCrop(x,y,fitRange)
polyCoeff = np.polyfit(xBkg,yBkg,n)
polyBackgroundFunction = np.poly1d(polyCoeff)
return y - polyBackgroundFunction(x)
def lineSubtract(data, n=1, maskon=False, thres=4, M=4, normalize=True, colSubtract=False):
'''
Remove a polynomial background from the data line-by-line, with
the option to skip pixels within certain distance away from
impurities. If the data is 3D (eg. 3ds) this does a 2D background
subtract on each layer independently. Input is a numpy array.
Inputs:
data - Required : A 1D, 2D or 3D numpy array.
n - Optional : Degree of polynomial to subtract from each line.
(default : 1).
maskon - Optional : Boolean flag to determine if the impurty areas are excluded.
thres - Optional : Float number specifying the threshold to determine
if a pixel is impurity or bad pixels. Any pixels with intensity greater
than thres*std will be identified as bad points.
M - Optional : Integer number specifying the box size where all pixels will be excluded
from poly fitting.
normalize - Optional : Boolean flag to determine if the mean of a layer
is set to zero (True) or preserved (False).
(default : True)
colSubtract - Optional : Boolean flag (False by default) to determine if polynomial background should also be subtracted column-wise
Returns:
subtractedData - Data after removing an n-degree polynomial
Usage:
dataObject.z = lineSubtract(dataObject.Z, n=1, normalize=True)
dataObject.z = lineSubtract(dataObject.Z, n=1, mask=True, thres=1.5, M=4, normalize=True)
History:
2017-07-19 - HP : Updated to work for 1D data.
2018-06-07 - MF : Updated to do a background subtract in the orthogonal direction (ie. column-wise)
2018-11-04 - RL : Updated to add mask to exclude impurity and | |
import sys
import os
import pickle
import pathlib
import argparse
from paddle import nn
import paddle
import yaml
from easydict import EasyDict
from sklearn.model_selection import train_test_split
import numpy as np
import seaborn as sns
from tqdm.auto import tqdm
from termcolor import colored
import time
from utils.dataloader import get_dataloader, PostTensorTransform
loss_fn = nn.CrossEntropyLoss()
def all2one_target_transform(x, attack_target=1):
return paddle.ones_like(x) * attack_target
def all2all_target_transform(x, num_classes):
return (x + 1) % num_classes
def create_attack_model(dataset, attack_model=None):
if dataset == 'cifar10':
from attack_models.unet import UNet
atkmodel = UNet(3)
elif dataset == 'mnist':
from attack_models.autoencoders import MNISTAutoencoder as Autoencoder
atkmodel = Autoencoder()
elif dataset == 'tiny-imagenet' or dataset == 'tiny-imagenet32' or dataset == 'gtsrb':
if attack_model is None:
from attack_models.autoencoders import Autoencoder
atkmodel = Autoencoder()
elif attack_model == 'unet':
from attack_models.unet import UNet
atkmodel = UNet(3)
else:
raise Exception(f'Invalid atk model {dataset}')
return atkmodel
def create_models(args):
"""DONE
"""
if args.dataset == 'cifar10':
if args.attack_model is None or args.attack_model == 'autoencoder':
from attack_models.autoencoders import Autoencoder
atkmodel = Autoencoder(args.input_channel)
# Copy of attack model
tgtmodel = Autoencoder(args.input_channel)
elif args.attack_model == 'unet':
from attack_models.unet import UNet
atkmodel = UNet(args.input_channel)
# Copy of attack model
tgtmodel = UNet(args.input_channel)
elif args.dataset == 'mnist':
from attack_models.autoencoders import Autoencoder
atkmodel = Autoencoder(args.input_channel)
# Copy of attack model
tgtmodel = Autoencoder(args.input_channel)
else:
raise Exception(f'Invalid atk model {args.dataset}')
# Classifier
if args.clsmodel == 'vgg11':
from paddle.vision.models import vgg11
def create_net():
return vgg11(num_classes=args.num_classes)
elif args.clsmodel == 'resnet18':
from paddle.vision.models import resnet18
def create_net():
return resnet18(num_classes=args.num_classes)
elif args.clsmodel == 'mnist_cnn':
from classifier_models.cnn import NetC_MNIST
def create_net():
return NetC_MNIST()
else:
raise Exception(f'Invalid clsmodel {args.clsmodel}')
clsmodel = create_net()
# Optimizer
tgtoptimizer = paddle.optimizer.Adam(parameters=tgtmodel.parameters(), learning_rate=args.lr_atk)
return atkmodel, tgtmodel, tgtoptimizer, clsmodel, create_net
def test(args, atkmodel, scratchmodel, target_transform,
train_loader, test_loader, epoch, trainepoch, clip_image,
testoptimizer=None, log_prefix='Internal', epochs_per_test=5):
#default phase 2 parameters to phase 1
if args.test_alpha is None:
args.test_alpha = args.alpha
if args.test_eps is None:
args.test_eps = args.eps
test_loss = 0
correct = 0
correct_transform = 0
test_transform_loss = 0
atkmodel.eval()
if testoptimizer is None:
testoptimizer = paddle.optimizer.SGD(parameters=scratchmodel.parameters(), learning_rate=args.lr)
for cepoch in range(trainepoch):
scratchmodel.train()
pbar = tqdm(enumerate(train_loader), total=len(train_loader), position=0, leave=True)
for batch_idx, (data, target) in pbar:
testoptimizer.clear_grad()
with paddle.no_grad():
noise = atkmodel(data) * args.test_eps
atkdata = clip_image(data + noise)
atkoutput = scratchmodel(atkdata)
output = scratchmodel(data)
loss_clean = loss_fn(output, target)
loss_poison = loss_fn(atkoutput, target_transform(target))
loss = args.alpha * loss_clean + (1-args.test_alpha) * loss_poison
loss.backward()
testoptimizer.step()
if batch_idx % 10 == 0 or batch_idx == (len(train_loader)-1):
pbar.set_description(
'Test [{}-{}] Loss: Clean {:.4f} Poison {:.4f} Total {:.5f}'.format(
epoch, cepoch,
loss_clean.item(),
loss_poison.item(),
loss.item()
))
if cepoch % epochs_per_test == 0 or cepoch == trainepoch-1:
scratchmodel.eval()
with paddle.no_grad():
for data, target in test_loader:
if len(target.shape) == 1:
target = target.reshape([data.shape[0], 1])
output = scratchmodel(data)
test_loss += paddle.nn.functional.cross_entropy(output, target, reduction='sum').item() # sum up batch loss
correct += paddle.metric.accuracy(output, target).item() * len(target)
noise = atkmodel(data) * args.test_eps
atkdata = clip_image(data + noise)
atkoutput = scratchmodel(atkdata)
test_transform_loss += paddle.nn.functional.cross_entropy(
atkoutput, target_transform(target), reduction='sum').item() # sum up batch loss
correct_transform += paddle.metric.accuracy(atkoutput, target_transform(target)).item() * len(target)
test_loss /= len(test_loader.dataset)
test_transform_loss /= len(test_loader.dataset)
correct /= len(test_loader.dataset)
correct_transform /= len(test_loader.dataset)
print(
'\n{}-Test set [{}]: Loss: clean {:.4f} poison {:.4f}, Accuracy: clean {:.2f} poison {:.2f}'.format(
log_prefix, cepoch,
test_loss, test_transform_loss,
correct, correct_transform
))
return correct, correct_transform
def train(args, atkmodel, tgtmodel, clsmodel, tgtoptimizer, clsoptimizer, target_transform,
train_loader, epoch, train_epoch, create_net, clip_image, post_transforms=None):
clsmodel.train()
atkmodel.eval()
tgtmodel.train()
losslist = []
pbar = tqdm(enumerate(train_loader), total=len(train_loader), position=0, leave=True)
for batch_idx, (data, target) in pbar:
if post_transforms is not None:
data = post_transforms(data)
########################################
#### Update Transformation Function ####
########################################
noise = tgtmodel(data) * args.eps
atkdata = clip_image(data + noise)
# Calculate loss
atkoutput = clsmodel(atkdata)
loss_poison = loss_fn(atkoutput, target_transform(target))
loss1 = loss_poison
losslist.append(loss1.item())
clsoptimizer.clear_grad()
tgtoptimizer.clear_grad()
loss1.backward()
tgtoptimizer.step() #this is the slowest step
###############################
#### Update the classifier ####
###############################
noise = atkmodel(data) * args.eps
atkdata = clip_image(data + noise)
output = clsmodel(data)
atkoutput = clsmodel(atkdata)
loss_clean = loss_fn(output, target)
loss_poison = loss_fn(atkoutput, target_transform(target))
loss2 = loss_clean * args.alpha + (1-args.alpha) * loss_poison
clsoptimizer.clear_grad()
loss2.backward()
clsoptimizer.step()
if batch_idx % 10 == 0 or batch_idx == (len(train_loader)-1):
pbar.set_description('Train [{}] Loss: clean {:.4f} poison {:.4f} CLS {:.4f} ATK:{:.4f}'.format(
epoch, loss_clean.item(), loss_poison.item(), loss1.item(), loss2.item()))
pbar.close()
atkloss = sum(losslist) / len(losslist)
return atkloss
def create_paths(args):
if args.mode == 'all2one':
basepath = os.path.join(args.path, f'{args.mode}_{args.target_label}', args.dataset, args.clsmodel)
else:
basepath = os.path.join(args.path, args.mode, args.dataset, args.clsmodel)
basepath = os.path.join(basepath, f'lr{args.lr}-lratk{args.lr_atk}-eps{args.eps}-alpha{args.alpha}-clsepoch{args.train_epoch}-atkmodel{args.attack_model}')
if not os.path.exists(basepath):
print(f'Creating new model training in {basepath}')
os.makedirs(basepath)
checkpoint_path = os.path.join(basepath, 'checkpoint.ckpt')
bestmodel_path = os.path.join(basepath, 'bestmodel.ckpt')
return basepath, checkpoint_path, bestmodel_path
def get_target_transform(args):
"""DONE
"""
if args.mode == 'all2one':
target_transform = lambda x: all2one_target_transform(x, args.target_label)
elif args.mode == 'all2all':
target_transform = lambda x: all2all_target_transform(x, args.num_classes)
else:
raise Exception(f'Invalid mode {args.mode}')
return target_transform
def get_train_test_loaders(args):
"""DONE
"""
if args.dataset == "cifar10":
args.input_height = 32
args.input_width = 32
args.input_channel = 3
args.num_classes = 10
elif args.dataset == "gtsrb":
args.input_height = 32
args.input_width = 32
args.input_channel = 3
args.num_classes = 43
elif args.dataset == "mnist":
args.input_height = 32
args.input_width = 32
args.input_channel = 1
args.num_classes = 10
elif args.dataset in ['tiny-imagenet32']:
args.input_height = 32
args.input_width = 32
args.input_channel = 3
args.num_classes = 200
else:
raise Exception("Invalid Dataset")
train_loader = get_dataloader(args, True, args.pretensor_transform)
test_loader = get_dataloader(args, False, args.pretensor_transform)
if args.dataset in ['tiny-imagenet', 'tiny-imagenet32']:
xmin, xmax = -2.1179039478302, 2.640000104904175
def clip_image(x):
return paddle.clip(x, xmin, xmax)
elif args.dataset == 'cifar10':
def clip_image(x):
return x #no clipping
elif args.dataset == 'mnist':
def clip_image(x):
return paddle.clip(x, -1.0, 1.0)
elif args.dataset == 'gtsrb':
def clip_image(x):
return paddle.clip(x, 0.0, 1.0)
else:
raise Exception(f'Invalid dataset: {args.dataset}')
return train_loader, test_loader, clip_image
def main(args):
paddle.seed(args.seed)
np.random.seed(args.seed)
args.device = paddle.set_device("gpu" if paddle.device.is_compiled_with_cuda() else "cpu")
if args.verbose >= 1:
print('========== ARGS ==========')
print(args)
train_loader, test_loader, clip_image = get_train_test_loaders(args)
post_transforms = PostTensorTransform(args)
print('========== DATA ==========')
print('Loaders: Train {} examples/{} iters, Test {} examples/{} iters'.format(
len(train_loader.dataset), len(train_loader), len(test_loader.dataset), len(test_loader)))
atkmodel, tgtmodel, tgtoptimizer, clsmodel, create_net = create_models(args)
if args.verbose >= 2:
print('========== MODELS ==========')
print(atkmodel)
print(clsmodel)
target_transform = get_target_transform(args)
basepath, checkpoint_path, bestmodel_path = create_paths(args)
print('========== PATHS ==========')
print(f'Basepath: {basepath}')
print(f'Checkpoint Model: {checkpoint_path}')
print(f'Best Model: {bestmodel_path}')
if os.path.exists(checkpoint_path):
#Load previously saved models
checkpoint = paddle.load(checkpoint_path)
print(colored('Load existing attack model from path {}'.format(checkpoint_path), 'red'))
atkmodel.load_dict(checkpoint['atkmodel'], use_structured_name=True)
clsmodel.load_dict(checkpoint['clsmodel'], use_structured_name=True)
trainlosses = checkpoint['trainlosses']
best_acc_clean = checkpoint['best_acc_clean']
best_acc_poison = checkpoint['best_acc_poison']
start_epoch = checkpoint['epoch']
tgtoptimizer.load_dict(checkpoint['tgtoptimizer'])
else:
#Create new model
print(colored('Create new model from {}'.format(checkpoint_path), 'blue'))
best_acc_clean = 0
best_acc_poison = 0
trainlosses = []
start_epoch = 1
#Initialize the tgtmodel
tgtmodel.load_dict(atkmodel.state_dict(), use_structured_name=True)
print('============================')
print('============================')
print('BEGIN TRAINING >>>>>>')
clsoptimizer = paddle.optimizer.Momentum(parameters=clsmodel.parameters(), learning_rate=args.lr, momentum=0.9)
for epoch in range(start_epoch, args.epochs + 1):
for i in range(args.train_epoch):
print(f'===== EPOCH: {epoch}/{args.epochs + 1} CLS {i+1}/{args.train_epoch} =====')
if not args.avoid_clsmodel_reinitialization:
clsoptimizer = paddle.optimizer.SGD(parameters=clsmodel.parameters(), learning_rate=args.lr)
trainloss = train(args, atkmodel, tgtmodel, clsmodel, tgtoptimizer, clsoptimizer, target_transform, train_loader,
epoch, i, create_net, clip_image,
post_transforms=post_transforms)
trainlosses.append(trainloss)
atkmodel.load_dict(tgtmodel.state_dict())
if args.avoid_clsmodel_reinitialization:
scratchmodel = create_net()
scratchmodel.load_dict(clsmodel.state_dict()) #transfer from cls to scratch for testing
else:
clsmodel = create_net()
scratchmodel = create_net()
if epoch % args.epochs_per_external_eval == 0 or epoch == args.epochs:
acc_clean, acc_poison = test(args, atkmodel, scratchmodel, target_transform,
train_loader, test_loader, epoch, args.cls_test_epochs, clip_image,
log_prefix='External')
else:
acc_clean, acc_poison = test(args, atkmodel, scratchmodel, target_transform,
train_loader, test_loader, epoch, args.train_epoch, clip_image,
log_prefix='Internal')
if acc_clean > best_acc_clean or (acc_clean > (best_acc_clean-args.best_threshold) and best_acc_poison < acc_poison):
best_acc_poison = acc_poison
best_acc_clean = acc_clean
paddle.save({'atkmodel': atkmodel.state_dict(), 'clsmodel': clsmodel.state_dict()}, bestmodel_path)
paddle.save({
'atkmodel': atkmodel.state_dict(),
'clsmodel': clsmodel.state_dict(),
'tgtoptimizer': tgtoptimizer.state_dict(),
'best_acc_clean': best_acc_clean,
'best_acc_poison': best_acc_poison,
'trainlosses': trainlosses,
'epoch': epoch
}, checkpoint_path)
def create_config_parser():
parser = argparse.ArgumentParser(description='PaddlePaddle LIRA Phase 1')
parser.add_argument('--dataset', type=str, default='cifar10')
parser.add_argument('--data_root', type=str, default='data/')
parser.add_argument("--random_rotation", type=int, default=10)
parser.add_argument("--random_crop", type=int, default=5)
parser.add_argument("--pretensor_transform", action='store_true', default=False)
parser.add_argument('--num-workers', type=int, default=2, help='dataloader workers')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=1000, help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, help='learning rate (default: 0.01)')
parser.add_argument('--lr-atk', type=float, default=0.0001, help='learning rate for attack model')
parser.add_argument('--seed', type=int, default=999, help='random seed (default: 999)')
parser.add_argument('--save-model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--train-epoch', type=int, default=1, help='training epochs for victim | |
#!/usr/bin/python
#
# This script generates summary statistics and raw plots for the data note
# associated with the annotations of portrayed emotions in the movie
# Forrest Gump. It is intended to serve as a more detailed description
# of the employed analysis and aggregation procedures than what is possible
# to convey in a manuscript.
#
# In order to reproduce the results, the script needs to be executed in the
# root of the extracted dataset. Summary statistics are printed with LaTeX
# markup and were directly included into the LaTeX sources of the associated
# Data note publication.
#
# Required arguments:
# 1. path to store the generated inter-observer agreement times series
# 2. path to store the generated figures
#
# The following python packages are required:
# - NumPy
# - SciPy
# - seaborn
#
# Example:
# $ python descr_stats.py /tmp /tmp
#
# This source code is (C) by <NAME> <<EMAIL>> and
# made available under the terms of the Creative Common Attribution-ShareAlike
# 4.0 International (CC BY-SA 4.0) license.
#
import numpy as np
from scipy.stats import spearmanr
# hard code the max duration of the movie stimulus
maxmovietime = 7085.28
#
# Helpers from PyMVPA
#
def plot_bars(data, labels=None, title=None, ylim=None, ylabel=None,
width=0.2, offset=0.2, color='0.6', distance=1.0,
yerr='ste', xloc=None, **kwargs):
"""Make bar plots with automatically computed error bars.
Candlestick plot (multiple interleaved barplots) can be done,
by calling this function multiple time with appropriatly modified
`offset` argument.
Parameters
----------
data : array (nbars x nobservations) or other sequence type
Source data for the barplot. Error measure is computed along the
second axis.
labels : list or None
If not None, a label from this list is placed on each bar.
title : str
An optional title of the barplot.
ylim : 2-tuple
Y-axis range.
ylabel : str
An optional label for the y-axis.
width : float
Width of a bar. The value should be in a reasonable relation to
`distance`.
offset : float
Constant offset of all bar along the x-axis. Can be used to create
candlestick plots.
color : matplotlib color spec
Color of the bars.
distance : float
Distance of two adjacent bars.
yerr : {'ste', 'std', None}
Type of error for the errorbars. If `None` no errorbars are plotted.
xloc : sequence
Locations of the bars on the x axis.
**kwargs
Any additional arguments are passed to matplotlib's `bar()` function.
"""
import pylab as pl
# determine location of bars
if xloc is None:
xloc = (np.arange(len(data)) * distance) + offset
if yerr == 'ste':
yerr = [np.std(d) / np.sqrt(len(d)) for d in data]
elif yerr == 'std':
yerr = [np.std(d) for d in data]
else:
# if something that we do not know just pass on
pass
# plot bars
plot = pl.bar(xloc,
[np.mean(d) for d in data],
yerr=yerr,
width=width,
color=color,
ecolor='black',
**kwargs)
if ylim:
pl.ylim(*(ylim))
if title:
pl.title(title)
if labels:
pl.xticks(xloc + width / 2, labels)
if ylabel:
pl.ylabel(ylabel)
# leave some space after last bar
pl.xlim(0, xloc[-1] + width + offset)
return plot
def unique_combinations(L, n, sort=False):
"""Return unique combinations form a list L of objects in groups of size n.
Parameters
----------
L : list
list of unique ids
n : int
length of the subsets to return
sort : bool, optional
if True -- result is sorted before returning
If you are intended to use only a small subset of possible
combinations, it is advised to use a generator
`xunique_combinations`.
"""
res = list(xunique_combinations(L, n))
if sort:
res = sorted(res)
return res
def xunique_combinations(L, n):
"""Generator of unique combinations form a list L of objects in
groups of size n.
Parameters
----------
L : list
list of unique ids
n : int
grouping size
Adopted from <NAME>
http://code.activestate.com/recipes/190465/
(MIT license, according to activestate.com's policy)
Also good discussions on combinations/variations/permutations
with various implementations are available at
http://mail.python.org/pipermail/python-list/2004-October/286054.html
"""
if n == 0:
yield []
else:
for i in range(len(L) - n + 1):
for cc in xunique_combinations(L[i + 1:], n - 1):
yield [L[i]] + cc
#
# Load data
#
def get_shots():
starts = np.loadtxt('movie_shots.csv')
segments = np.array((starts,
np.concatenate((starts[1:],
(maxmovietime,))))).T
return segments
def get_scenes():
starts = np.recfromcsv('movie_scenes.csv',
names=('start', 'title', 'tod', 'set'))['start']
segments = np.array((starts,
np.concatenate((starts[1:],
(maxmovietime,))))).T
return segments
def get_nsecond_segments(n=1):
max = get_scenes()[-1, 1]
return np.array((np.arange(0, max - n, n), np.arange(n, max, n))).T
def get_av_ratings():
import glob
return [np.recfromcsv(f) for f in glob.glob('raw/av*.csv')]
def get_ao_ratings():
import glob
return [np.recfromcsv(f) for f in glob.glob('raw/ao*.csv')]
def get_all_ratings():
return get_av_ratings() + get_ao_ratings()
#
# Stats
#
def get_labeled_fraction(rat, col):
# what fraction of the annotations carry values in a specific column
tot = np.sum([len(r) for r in rat])
lbl = np.sum([len(r) for r in get_labeled_ratings(rat, col)])
return float(lbl) / tot
def get_agreed_labels(ratings, col, segments, athresh=0.5, nseg_thresh=0):
# determine values for a particular column that show a minimum
# inter-observer agreement for a minimum number of time segments
# anywhere in the movie
from scipy.ndimage.measurements import label
labels = \
np.unique(
np.concatenate(
[np.unique(
np.concatenate(
[d.split() for d in r[col]]))
for r in ratings]))
found = []
for l in labels:
match = slice2segments(ratings, {col: l}, segments) > athresh
nseg = np.sum(match)
nblobs = label(match)[1]
if nblobs > nseg_thresh:
found.append((l, nseg, nblobs))
return found
def calc_bootstrapped_intercorrelation(ratings, cond1, cond2, segments):
# split the set of observers into all possible ~halves and
# compute the time series correlations of inter-oberserver
# agreement wrt annotation matching particular criteria across
# both groups
from mvpa2.misc.support import unique_combinations
N = len(ratings)
corr = []
for combo in unique_combinations(range(N), N / 2):
half1 = [ratings[i] for i in combo]
half2 = [ratings[i] for i in xrange(N) if not i in combo]
c1 = slice2segments(half1, cond1, segments) \
- slice2segments(half1, cond2, segments)
c2 = slice2segments(half2, cond1, segments) \
- slice2segments(half2, cond2, segments)
corr.append(spearmanr(c1, c2)[0])
return corr
def get_ci_stats(arr):
# convert an array of correlation scores into a LaTeX
# markup with mean and CI
m = np.mean(arr)
sem = np.std(arr) / np.sqrt(len(arr))
if np.isnan(m):
return 'n/a'
else:
if m >= 0.5:
return '\\textbf{%.3f} $\\pm$%.3f' % (m, 1.96 * sem)
else:
return '%.3f $\\pm$%.3f' % (m, 1.96 * sem)
def get_corr_ci(v1, v2):
# take to time series, compute the correlation, and yield a
# LaTeX markup of the value plus a CI (via Fisher transform.)
c = spearmanr(v1, v2)[0]
# fisher transform
fc = np.arctanh(c)
se = 1. / np.sqrt(len(v1) - 3)
# back to correlation
ci = np.tanh(fc + 1.96 * se)
if np.isnan(c):
return 'n/a'
else:
if c >= 0.5:
return '\\textbf{%.3f} $\\pm$%.3f' % (c, ci - c)
else:
return '%.3f $\\pm$%.3f' % (c, ci - c)
def print_stats(rat, rat_label, all_rat):
# compute various annotation statistics
print '\\newcommand{\\%sTotalRaters}{%i}' % (rat_label, len(rat))
athresh = 0.5
print '\\newcommand{\\%sAggThresh}{%i\\%%}' % (rat_label, athresh * 100)
segments = get_nsecond_segments()
print '\\newcommand{\\%sFracWithLabeledChar}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'character') * 100)
e = get_agreed_labels(rat, 'character', segments, athresh=-1)
print '%% %s total character labels' % (rat_label,)
print '%% %s' % [v[0] for v in e]
print '\\newcommand{\\%sTotalCharLabels}{%i}' % (rat_label, len(e))
e = get_agreed_labels(rat, 'character', segments, athresh=athresh, nseg_thresh=5)
print '%% %s character labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshCharLabels}{%i}' % (rat_label, len(e))
print '\\newcommand{\\%sFracWithLabeledEmotions}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'emotion') * 100)
e = get_agreed_labels(rat, 'emotion', segments, athresh=athresh)
print '%% %s emotion labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshEmoLabels}{%i}' % (rat_label, len(e))
print '\\newcommand{\\%sFracWithLabeledOncue}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'oncue') * 100)
e = get_agreed_labels(rat, 'oncue', segments, athresh=athresh)
print '%% %s oncue labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshOncueLabels}{%i}' % (rat_label, len(e))
print '\\newcommand{\\%sFracWithLabeledOffcue}{%.1f\\%%}' \
% (rat_label, get_labeled_fraction(rat, 'offcue') * 100)
e = get_agreed_labels(rat, 'offcue', segments, athresh=athresh)
print '%% %s offcue labels AGG > %.2f' % (rat_label, athresh)
print '%% %s' % e
print '\\newcommand{\\%sThreshOffcueLabels}{%i}' % (rat_label, len(e))
# per character stats
for char, clabel in (('*', 'AllChar'),
('FORREST', 'Forrest'),
('JENNY', 'Jenny')):
print '\\newcommand{\\%sCorrArousalValence%s}{%s}' \
% (rat_label, clabel,
get_corr_ci(get_arousal_modulation(rat, segments, char=char),
get_valence_modulation(rat, segments, char=char)))
print '\\newcommand{\\%sCorrValenceDirection%s}{%s}' \
% (rat_label, clabel,
get_corr_ci(get_valence_modulation(rat, segments, char=char),
get_direction_modulation(rat, segments, char=char)))
print '\\newcommand{\\%sCorrArousalDirection%s}{%s}' \
| |
# -*- coding: utf-8 -*-
"""
Authors: <NAME>, <NAME>, <NAME>, and
<NAME>
IHE Delft 2017
Contact: <EMAIL>
Repository: https://github.com/gespinoza/hants
Module: hants
"""
from __future__ import division
import netCDF4
import pandas as pd
import math
from .davgis.functions import (Spatial_Reference, List_Datasets, Clip,
Resample, Raster_to_Array, NetCDF_to_Raster)
import os
import tempfile
from copy import deepcopy
import matplotlib.pyplot as plt
import warnings
def run_HANTS(rasters_path_inp, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path,
nb, nf, HiLo, low, high, fet, dod, delta,
epsg=4326, fill_val=-9999.0,
rasters_path_out=None, export_hants_only=False):
'''
This function runs the python implementation of the HANTS algorithm. It
takes a folder with geotiffs raster data as an input, creates a netcdf
file, and optionally export the data back to geotiffs.
'''
create_netcdf(rasters_path_inp, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg, fill_val)
HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val)
#if rasters_path_out:
#export_tiffs(rasters_path_out, nc_path, name_format, export_hants_only)
return nc_path
def create_netcdf(rasters_path, name_format, start_date, end_date,
latlim, lonlim, cellsize, nc_path,
epsg=4326, fill_val=-9999.0):
'''
This function creates a netcdf file from a folder with geotiffs rasters to
be used to run HANTS.
'''
# Latitude and longitude
lat_ls = pd.np.arange(latlim[0] + 0.5*cellsize, latlim[1] + 0.5*cellsize,
cellsize)
lat_ls = lat_ls[::-1] # ArcGIS numpy
lon_ls = pd.np.arange(lonlim[0] + 0.5*cellsize, lonlim[1] + 0.5*cellsize,
cellsize)
lat_n = len(lat_ls)
lon_n = len(lon_ls)
spa_ref = Spatial_Reference(epsg)
ll_corner = [lonlim[0], latlim[0]]
# Rasters
dates_dt = pd.date_range(start_date, end_date, freq='D')
dates_ls = [d.strftime('%Y%m%d') for d in dates_dt]
ras_ls = List_Datasets(rasters_path, 'tif')
# Cell code
temp_ll_ls = [pd.np.arange(x, x + lon_n)
for x in range(1, lat_n*lon_n, lon_n)]
code_ls = pd.np.array(temp_ll_ls)
empty_vec = pd.np.empty((lat_n, lon_n))
empty_vec[:] = fill_val
# Create netcdf file
print('Creating netCDF file...')
nc_file = netCDF4.Dataset(nc_path, 'w', format="NETCDF4")
# Create Dimensions
lat_dim = nc_file.createDimension('latitude', lat_n)
lon_dim = nc_file.createDimension('longitude', lon_n)
time_dim = nc_file.createDimension('time', len(dates_ls))
# Create Variables
crs_var = nc_file.createVariable('crs', 'i4')
crs_var.grid_mapping_name = 'latitude_longitude'
crs_var.crs_wkt = spa_ref
lat_var = nc_file.createVariable('latitude', 'f8', ('latitude'),
fill_value=fill_val)
lat_var.units = 'degrees_north'
lat_var.standard_name = 'latitude'
lon_var = nc_file.createVariable('longitude', 'f8', ('longitude'),
fill_value=fill_val)
lon_var.units = 'degrees_east'
lon_var.standard_name = 'longitude'
time_var = nc_file.createVariable('time', 'l', ('time'),
fill_value=fill_val)
time_var.standard_name = 'time'
time_var.calendar = 'gregorian'
code_var = nc_file.createVariable('code', 'i4', ('latitude', 'longitude'),
fill_value=fill_val)
outliers_var = nc_file.createVariable('outliers', 'i4',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
outliers_var.long_name = 'outliers'
original_var = nc_file.createVariable('original_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
original_var.long_name = 'original values'
hants_var = nc_file.createVariable('hants_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
hants_var.long_name = 'hants values'
combined_var = nc_file.createVariable('combined_values', 'f8',
('latitude', 'longitude', 'time'),
fill_value=fill_val)
combined_var.long_name = 'combined values'
print('\tVariables created')
# Load data
lat_var[:] = lat_ls
lon_var[:] = lon_ls
time_var[:] = dates_ls
code_var[:] = code_ls
# temp folder
temp_dir = tempfile.mkdtemp()
bbox = [lonlim[0], latlim[0], lonlim[1], latlim[1]]
# Raster loop
print('\tExtracting data from rasters...')
for tt in range(len(dates_ls)):
# Raster
ras = name_format.format(dates_ls[tt])
if ras in ras_ls:
# Resample
ras_resampled = os.path.join(temp_dir, 'r_' + ras)
Resample(os.path.join(rasters_path, ras), ras_resampled, cellsize)
# Clip
ras_clipped = os.path.join(temp_dir, 'c_' + ras)
Clip(ras_resampled, ras_clipped, bbox)
# Raster to Array
array = Raster_to_Array(ras_resampled,
ll_corner, lon_n, lat_n,
values_type='float32')
# Store values
original_var[:, :, tt] = array
else:
# Store values
original_var[:, :, tt] = empty_vec
# Close file
nc_file.close()
print('NetCDF file created')
# Return
return nc_path
def HANTS_netcdf(nc_path, nb, nf, HiLo, low, high, fet, dod, delta,
fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm. It
takes the input netcdf file and fills the 'hants_values',
'combined_values', and 'outliers' variables.
'''
# Read netcdfs
nc_file = netCDF4.Dataset(nc_path, 'r+')
time_var = nc_file.variables['time'][:]
original_values = nc_file.variables['original_values'][:]
[rows, cols, ztime] = original_values.shape
size_st = cols*rows
values_hants = pd.np.empty((rows, cols, ztime))
outliers_hants = pd.np.empty((rows, cols, ztime))
values_hants[:] = pd.np.nan
outliers_hants[:] = pd.np.nan
# Additional parameters
ni = len(time_var)
ts = range(ni)
# Loop
counter = 1
print('Running HANTS...')
for m in range(rows):
for n in range(cols):
print('\t{0}/{1}'.format(counter, size_st))
y = pd.np.array(original_values[m, n, :])
y[pd.np.isnan(y)] = fill_val
[yr, outliers] = HANTS(ni, nb, nf, y, ts, HiLo,
low, high, fet, dod, delta, fill_val)
values_hants[m, n, :] = yr
outliers_hants[m, n, :] = outliers
counter = counter + 1
nc_file.variables['hants_values'][:] = values_hants
nc_file.variables['outliers'][:] = outliers_hants
nc_file.variables['combined_values'][:] = pd.np.where(outliers_hants,
values_hants,
original_values)
# Close netcdf file
nc_file.close()
def HANTS_singlepoint(nc_path, point, nb, nf, HiLo, low, high, fet, dod,
delta, fill_val=-9999.0):
'''
This function runs the python implementation of the HANTS algorithm for a
single point (lat, lon). It plots the fit and returns a data frame with
the 'original' and the 'hants' time series.
'''
# Location
lonx = point[0]
latx = point[1]
nc_file = netCDF4.Dataset(nc_path, 'r')
time = [pd.to_datetime(i, format='%Y%m%d')
for i in nc_file.variables['time'][:]]
lat = nc_file.variables['latitude'][:]
lon = nc_file.variables['longitude'][:]
# Check that the point falls within the extent of the netcdf file
lon_max = max(lon)
lon_min = min(lon)
lat_max = max(lat)
lat_min = min(lat)
if not (lon_min < lonx < lon_max) or not (lat_min < latx < lat_max):
warnings.warn('The point lies outside the extent of the netcd file. '
'The closest cell is plotted.')
if lonx > lon_max:
lonx = lon_max
elif lonx < lon_min:
lonx = lon_min
if latx > lat_max:
latx = lat_max
elif latx < lat_min:
latx = lat_min
# Get lat-lon index in the netcdf file
lat_closest = lat.flat[pd.np.abs(lat - latx).argmin()]
lon_closest = lon.flat[pd.np.abs(lon - lonx).argmin()]
lat_i = pd.np.where(lat == lat_closest)[0][0]
lon_i = pd.np.where(lon == lon_closest)[0][0]
# Read values
original_values = nc_file.variables['original_values'][lat_i, lon_i, :]
# Additional parameters
ni = len(time)
ts = range(ni)
# HANTS
y = pd.np.array(original_values)
y[pd.np.isnan(y)] = fill_val
[hants_values, outliers] = HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet,
dod, delta, fill_val)
# Plot
top = 1.15*max(pd.np.nanmax(original_values),
pd.np.nanmax(hants_values))
bottom = 1.15*min(pd.np.nanmin(original_values),
pd.np.nanmin(hants_values))
ylim = [bottom, top]
plt.plot(time, hants_values, 'r-', label='HANTS')
plt.plot(time, original_values, 'b.', label='Original data')
plt.ylim(ylim[0], ylim[1])
plt.legend(loc=4)
plt.xlabel('time')
plt.ylabel('values')
plt.gcf().autofmt_xdate()
plt.axes().set_title('Point: lon {0:.2f}, lat {1:.2f}'.format(lon_closest,
lat_closest))
plt.axes().set_aspect(0.5*(time[-1] - time[0]).days/(ylim[1] - ylim[0]))
plt.show()
# Close netcdf file
nc_file.close()
# Data frame
df = pd.DataFrame({'time': time,
'original': original_values,
'hants': hants_values})
# Return
return df
def HANTS(ni, nb, nf, y, ts, HiLo, low, high, fet, dod, delta, fill_val):
'''
This function applies the Harmonic ANalysis of Time Series (HANTS)
algorithm originally developed by the Netherlands Aerospace Centre (NLR)
(http://www.nlr.org/space/earth-observation/).
This python implementation was based on two previous implementations
available at the following links:
https://codereview.stackexchange.com/questions/71489/harmonic-analysis-of-time-series-applied-to-arrays
http://nl.mathworks.com/matlabcentral/fileexchange/38841-matlab-implementation-of-harmonic-analysis-of-time-series--hants-
'''
# Arrays
mat = pd.np.zeros((min(2*nf+1, ni), ni))
# amp = np.zeros((nf + 1, 1))
# phi = np.zeros((nf+1, 1))
yr = pd.np.zeros((ni, 1))
outliers = pd.np.zeros((1, len(y)))
# Filter
sHiLo = 0
if HiLo == 'Hi':
sHiLo = -1
elif HiLo == 'Lo':
sHiLo = 1
nr = min(2*nf+1, ni)
noutmax = ni - nr - dod
# dg = 180.0/math.pi
mat[0, :] = 1.0
ang = 2*math.pi*pd.np.arange(nb)/nb
cs = pd.np.cos(ang)
sn = pd.np.sin(ang)
i = pd.np.arange(1, nf+1)
for j in pd.np.arange(ni):
index = pd.np.mod(i*ts[j], nb)
mat[2 * i-1, j] = cs.take(index)
mat[2 * i, j] = sn.take(index)
p = pd.np.ones_like(y)
bool_out = (y < low) | (y > high)
p[bool_out] = 0
outliers[bool_out.reshape(1, y.shape[0])] = 1
nout = pd.np.sum(p == 0)
if nout > noutmax:
if pd.np.isclose(y, fill_val).any():
ready = pd.np.array([True])
yr = y
outliers = pd.np.zeros((y.shape[0]), dtype=int)
outliers[:] = fill_val
else:
raise Exception('Not enough data points.')
else:
ready = pd.np.zeros((y.shape[0]), dtype=bool)
nloop = 0
nloopmax = ni
while ((not ready.all()) & (nloop < nloopmax)):
nloop += 1
za = pd.np.matmul(mat, p*y)
A = pd.np.matmul(pd.np.matmul(mat, pd.np.diag(p)),
pd.np.transpose(mat))
A = A + pd.np.identity(nr)*delta
A[0, 0] = A[0, 0] - delta
zr = pd.np.linalg.solve(A, za)
yr = pd.np.matmul(pd.np.transpose(mat), zr)
diffVec = sHiLo*(yr-y)
err = p*diffVec
err_ls = list(err)
err_sort = deepcopy(err)
err_sort.sort()
rankVec = [err_ls.index(f) for f in err_sort]
maxerr = diffVec[rankVec[-1]]
ready = (maxerr <= fet) | (nout == noutmax)
if (not ready):
i = ni - 1
j = rankVec[i]
while ((p[j]*diffVec[j] > 0.5*maxerr) & (nout < noutmax)):
p[j] = 0
outliers[0, j] = 1
nout += 1
i -= 1
if i == 0:
j = 0
else:
j = 1
return [yr, outliers]
def export_tiffs(rasters_path_out, nc_path, name_format,
export_hants_only=False):
'''
This function exports the output of the HANTS analysis.
If 'export_hants_only' is False (default), the output rasters have the best
value available. Therefore, the cells in the output rasters will have the
original | |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import re
from django.db.models import Lookup
from django.contrib.postgres.fields import JSONField
class FilterTree(object):
"""
This class exposes methods for parsing a JSONB query from the Django ORM
and building a corresponding SQL query.
Manual filtering by way of Django's ORM might look like:
Something.objects.filter(<jsonb_field>__jsonb=<filter_specification>)
Check out the jsonb_field_testing test module for some real examples.
"""
def __init__(self, tree, field):
self.field = field # The JSONField to filter on.
self.tree = tree # The nested dictionary representing the query.
# Map the available filter types to their corresponding classmethod.
self.sql_generators = {
"intrange": self.intrange_filter,
"containment": self.containment_filter,
"containment_multiple": self.multiple_containment_filter
}
self.rules = self.get_rules(self.tree) # Parse and save the query directive.
def is_rule(self, obj):
"""
Check to see if a dictionary is formatted as a query "rule". This method
is useful for checking to see whether the recursion has bottomed
out in `get_rules`.
Args:
obj (dict): The dictionary that should be checked for ruleness.
Returns:
bool: True if the dict is a rule, False otherwise.
"""
if '_rule_type' in obj and obj['_rule_type'] in self.sql_generators:
return True
return False
def get_rules(self, obj, current_path=[]):
"""
Recursively crawl a dictionary to look for filtering rules.
Args:
obj (dict): The dictionary to be crawled.
current_path (list): The branch of the tree leading up to this point.
Returns:
list: A list of two-tuples representing query rules. The first element
will be the path to the value in question, while the second
element will be the rule to apply for the filter.
"""
# If node isn't a rule or dictionary
if type(obj) != dict:
return []
# If node is a rule return its location and its details
if self.is_rule(obj):
return [([self.field] + current_path, obj)]
rules = []
for path, val in obj.items():
rules = rules + self.get_rules(val, current_path + [path])
return rules
@staticmethod
def split_search_pattern(pattern):
# Split pattern word by word, but make sure to keep quoted words together
# Regex to match quoted substrings from https://stackoverflow.com/a/5696141
word_regex = r'({})'.format(
'|'.join([
# Match groups of words in double quotes (Allowing for escaping)
r'"[^"\\]*(?:\\.[^"\\]*)*"',
# Match groups of words in single quotes (Allowing for escaping)
r"'[^'\\]*(?:\\.[^'\\]*)*'",
# Match all unquoted words individually
'[\S]+'
])
)
matches = re.findall(word_regex, pattern)
# The regex matches the bounding quotes in each result, so we want to trim them off
return [match.strip('\'"') for match in matches]
def sql(self):
"""
Produce output that can be compiled into SQL by Django and psycopg2.
Returns:
A tuple of a (template) string followed by a list
of parameters for compiling that template. (This is the output that
Django expects for compiling a SQL query.)
"""
rule_specs = []
patterns = {}
pattern_specs = []
# It's safe to unpack `self.get_rules` because it can only
# return A) an empty list or B) a list of two-tuples with two elements in
# them (the path and the rule for each query directive).
for path, rule in self.rules:
# Don't parse if this is not a properly registered rule type.
if not self.is_rule(rule):
pass
rule_type = rule['_rule_type']
sql_tuple = self.sql_generators[rule_type](path, rule)
if sql_tuple is not None:
rule_specs.append(sql_tuple)
# The check on 'pattern' here allows us to apply a pattern filter on top of others
if 'pattern' in rule:
match_multiple = (rule['_rule_type'] == 'containment_multiple')
for pattern in self.split_search_pattern(rule['pattern']):
sql_tuple = FilterTree.text_similarity_filter(path, pattern, match_multiple)
# add to the list of rules generated for this pattern (one per field)
patterns.setdefault(pattern, []).append(sql_tuple)
rule_string = ' AND '.join([rule[0] for rule in rule_specs])
pattern_rules = patterns.values()
pattern_strings = []
# check if any of the fields for this string pattern match
for rule_list in pattern_rules:
pattern_strings.append(' OR '.join([rule[0] for rule in rule_list]))
pattern_specs += rule_list
# check that record has a match for all of the string patterns in some field
pattern_string = '(' + ') AND ('.join(pattern_strings) + ')' if pattern_strings else ''
if rule_string != '' and pattern_string != '':
filter_string = '(' + (' AND ('.join([rule_string, pattern_string])) + ')' + ')'
elif rule_string != '' or pattern_string != '':
filter_string = '(' + ''.join([rule_string, pattern_string]) + ')'
else:
filter_string = ''
# flatten the rule_paths
rule_paths_first = ([rule[1] for rule in rule_specs] +
[rule[1] for rule in pattern_specs])
rule_paths = [item for sublist in rule_paths_first
for item in sublist]
outcome = (filter_string, tuple(rule_paths))
return outcome
# Filters
@classmethod
def containment_filter(cls, path, rule):
"""
Filter for objects that match the `rule` at some location `path` in
a Record object.
Registered on the 'contains' rule type.
Args:
path (list): A list of keys representing the path to the field in question,
with keys stored from deepest to shallowest.
rule (dict): A dictionary representing the rule to apply.
Returns:
tuple: Information for building a SQL query from this filter rule,
with the containment query in the first position and the
parameters in the second.
"""
# The `path` dict stores the full branch that leads to the value in
# question, from leaf to root.
leaf, branch = path[0], path[1:]
template = reconstruct_object(branch)
has_containment = 'contains' in rule
abstract_contains_str = leaf + " @> %s"
if has_containment:
all_contained = rule.get('contains')
else:
return None
contains_params = []
json_path = [json.dumps(x) for x in branch]
for contained in all_contained:
interpolants = tuple(json_path + [json.dumps(contained)])
contains_params.append(template % interpolants)
contains_str = ' OR '.join([abstract_contains_str] * len(all_contained))
if contains_str != '':
return ('(' + contains_str + ')', contains_params)
else:
return None
@classmethod
def multiple_containment_filter(cls, path, rule):
"""
Filter for objects that match the specified `rule` in any of the objects in a
given list.
Registered on the 'containment_multiple' rule type.
Args:
path (list): A list of keys representing the path to the field in question,
with keys stored from deepest to shallowest.
rule (dict): A dictionary representing the rule to apply.
Returns:
tuple: Information for building a SQL query from this filter rule,
with the containment query in the first position and the
parameters in the second.
"""
# The `path` dict stores the full branch that leads to the value in
# question, from leaf to root.
leaf, branch = path[0], path[1:]
template = reconstruct_object_multiple(branch)
has_containment = 'contains' in rule
abstract_contains_str = leaf + " @> %s"
if has_containment:
all_contained = rule.get('contains')
else:
return None
contains_params = []
json_path = [json.dumps(x) for x in branch]
for contained in all_contained:
interpolants = tuple(json_path + [json.dumps(contained)])
contains_params.append(template % interpolants)
contains_str = ' OR '.join([abstract_contains_str] * len(all_contained))
if contains_str != '':
return ('(' + contains_str + ')', contains_params)
else:
return None
@classmethod
def intrange_filter(cls, path, rule):
"""
Filter for numbers that match boundaries provided by a rule.
Registered on the 'intrange' rule type.
Args:
path (list): A list of keys representing the path to the field in question,
with keys stored from deepest to shallowest.
rule (dict): A dictionary representing the rule to apply.
Returns:
tuple: Information for building a SQL query from this filter rule,
with the containment query in the first position and the
parameters in the second.
"""
traversed_int = "(" + extract_value_at_path(path) + ")::int"
has_min = 'min' in rule and rule['min'] is not None
has_max = 'max' in rule and rule['max'] is not None
if has_min:
minimum = rule['min']
more_than = ("{traversal_int} >= %s"
.format(traversal_int=traversed_int))
if has_max:
maximum = rule['max']
less_than = ("{traversal_int} <= %s"
.format(traversal_int=traversed_int))
# The `path` dict stores the full branch that leads to the value in
# question, from leaf to root.
branch = path[1:]
if has_min and not has_max:
sql_template = '(' + more_than + ')'
return (sql_template, branch + [minimum])
elif has_max and not has_min:
sql_template = '(' + less_than + ')'
return (sql_template, branch + [maximum])
elif has_max and has_min:
sql_template = '(' + less_than + ' AND ' + more_than + ')'
return (sql_template, branch + [maximum] + branch + [minimum])
else:
return None
@classmethod
| |
'Status',
'NumberOfRemainingSuboperations',
'NumberOfCompletedSuboperations',
'NumberOfFailedSuboperations',
'NumberOfWarningSuboperations']
num_of_remaining_sub_ops = dimse_property((0x0000, 0x1020))
"""
The number of remaining C-STORE sub-operations to be invoked for this C-GET operation.
"""
num_of_completed_sub_ops = dimse_property((0x0000, 0x1021))
"""
The number of C-STORE sub-operations invoked by this C-GET operation that have completed
successfully.
"""
num_of_failed_sub_ops = dimse_property((0x0000, 0x1022))
"""
The number of C-STORE sub-operations invoked by this C-GET operation that have failed.
"""
num_of_warning_sub_ops = dimse_property((0x0000, 0x1023))
"""
The number of C-STORE sub-operations invoked by this C-GET operation that generated warning
responses.
"""
class CMoveRQMessage(DIMSERequestMessage, PriorityMixin):
"""C-MOVE-RQ Message.
Complete definition can be found in DICOM PS3.7, 9.3.4.1 C-MOVE-RQ
"""
command_field = 0x0021
"""
This field distinguishes the DIMSE-C operation conveyed by this Message. The value of this
field shall be set to 0021H for the C-MOVE-RQ Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageID', 'Priority', 'MoveDestination']
move_destination = dimse_property((0x0000, 0x0600))
"""
Shall be set to the DICOM AE Title of the destination DICOM AE to which the C-STORE
sub-operations are being performed.
"""
class CMoveRSPMessage(DIMSEResponseMessage, StatusMixin):
"""C-MOVE-RSP Message.
Complete definition can be found in DICOM PS3.7, 9.3.4.2 C-MOVE-RSP
"""
command_field = 0x8021
"""
This field distinguishes the DIMSE-C operation conveyed by this Message. The value of this
field shall be set to 8021H for the C-MOVE-RSP Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageIDBeingRespondedTo', 'Status',
'NumberOfRemainingSuboperations',
'NumberOfCompletedSuboperations',
'NumberOfFailedSuboperations',
'NumberOfWarningSuboperations']
num_of_remaining_sub_ops = dimse_property((0x0000, 0x1020))
"""
The number of remaining sub-operations to be invoked for this C-MOVE operation.
"""
num_of_completed_sub_ops = dimse_property((0x0000, 0x1021))
"""
The number of C-STORE sub-operations invoked by this C-MOVE operation that have
completed successfully.
"""
num_of_failed_sub_ops = dimse_property((0x0000, 0x1022))
"""
The number of C-STORE sub-operations invoked by this C-MOVE operation that have failed.
"""
num_of_warning_sub_ops = dimse_property((0x0000, 0x1023))
"""
The number of C-STORE sub-operations invoked by this C-MOVE operation that generated
warning responses.
"""
class CCancelRQMessage(DIMSEResponseMessage):
"""C-CANCEL-FIND-RQ, C-CANCEL-GET-RQ, C-CANCEL-MOVE-RQ Messages.
Complete definition can be found in:
* DICOM PS3.7, 9.3.2.3 C-CANCEL-FIND-RQ
* DICOM PS3.7, 9.3.3.3 C-CANCEL-GET-RQ
* DICOM PS3.7, 9.3.4.3 C-CANCEL-MOVE-RQ
"""
command_field = 0x0FFF
"""
This field distinguishes the DIMSE-C operation conveyed by this Message. The value of this
field shall be set to 0FFFH for the C-CANCEL-MOVE-RQ Message.
"""
command_fields = ['CommandGroupLength', 'MessageIDBeingRespondedTo']
class NEventReportRQMessage(DIMSERequestMessage):
"""N-EVENT-REPORT-RQ Message.
Complete definition can be found in DICOM PS3.7, 10.3.1.1 N-EVENT-REPORT-RQ
"""
command_field = 0x0100
"""
This field distinguishes the DIMSE-N notification conveyed by this Message. The value of this
field shall be set to 0100H for the N-EVENT-REPORT-RQ Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID', 'MessageID',
'AffectedSOPInstanceUID', 'EventTypeID']
event_type_id = dimse_property((0x0000, 0x1002))
"""
Values for this field are application-specific.
"""
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance for which this event occurred.
"""
class NEventReportRSPMessage(DIMSEResponseMessage, StatusMixin):
"""N-EVENT-REPORT-RSP Message.
Complete definition can be found in DICOM PS3.7, 10.3.1.2 N-EVENT-REPORT-RSP
"""
command_field = 0x8100
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 8100H for the N-EVENT-REPORT-RSP Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageIDBeingRespondedTo',
'Status', 'AffectedSOPInstanceUID', 'EventTypeID']
event_type_id = dimse_property((0x0000, 0x1002))
"""
Values for this field are application-specific.
"""
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance for which this event occurred.
"""
class NGetRQMessage(DIMSERequestMessage):
"""N-GET-RQ Message.
Complete definition can be found in DICOM PS3.7, 10.3.2.1 N-GET-RQ
"""
command_field = 0x0110
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 0110H for the N-GET-RQ Message.
"""
command_fields = ['CommandGroupLength', 'RequestedSOPClassUID', 'MessageID',
'RequestedSOPInstanceUID', 'AttributeIdentifierList']
sop_class_uid = dimse_property((0x0000, 0x0003))
requested_sop_instance_uid = dimse_property((0x0000, 0x1001))
"""
Contains the UID of the SOP Instance for which Attribute Values are to be retrieved.
"""
attribute_identifier_list = dimse_property((0x0000, 0x1005))
"""
This field contains an Attribute Tag for each of the n Attributes applicable to the
N-GET operation.
"""
class NGetRSPMessage(DIMSEResponseMessage, StatusMixin):
"""N-GET-RSP Message.
Complete definition can be found in DICOM PS3.7, 10.3.2.2 N-GET-RSP
"""
command_field = 0x8110
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 8110H for the N-GET-RSP Message.
"""
command_fields = ['CommandGroupLength', 'MessageIDBeingRespondedTo',
'Status', 'AffectedSOPInstanceUID']
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance for which Attribute Values are returned.
"""
class NSetRQMessage(DIMSERequestMessage):
"""N-SET-RQ Message.
Complete definition can be found in DICOM PS3.7, 10.3.3.1 N-SET-RQ
"""
command_field = 0x0120
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 0120H for the N-SET-RQ Message.
"""
command_fields = ['CommandGroupLength', 'RequestedSOPClassUID',
'MessageID', 'RequestedSOPInstanceUID']
sop_class_uid = dimse_property((0x0000, 0x0003))
requested_sop_instance_uid = dimse_property((0x0000, 0x1001))
"""
Contains the UID of the SOP Instance for which Attribute values are to be modified.
"""
class NSetRSPMessage(DIMSEResponseMessage, StatusMixin):
"""N-SET-RSP Message.
Complete definition can be found in DICOM PS3.7, 10.3.3.2 N-SET-RSP
"""
command_field = 0x8120
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageIDBeingRespondedTo', 'Status',
'AffectedSOPInstanceUID']
sop_class_uid = dimse_property((0x0000, 0x0002))
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 8120H for the N-SET-RSP Message.
"""
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance for which Attribute Values were modified.
"""
class NActionRQMessage(DIMSERequestMessage):
"""N-ACTION-RQ Message.
Complete definition can be found in DICOM PS3.7, 10.3.4.1 N-ACTION-RQ
"""
command_field = 0x0130
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 0130H for the N-ACTION-RQ Message.
"""
command_fields = ['CommandGroupLength', 'RequestedSOPClassUID', 'MessageID',
'RequestedSOPInstanceUID', 'ActionTypeID']
sop_class_uid = dimse_property((0x0000, 0x0003))
requested_sop_instance_uid = dimse_property((0x0000, 0x1001))
"""
Contains the UID of the SOP Instance for which the action is to be performed.
"""
action_type_id = dimse_property((0x0000, 0x1008))
"""
Values for this field are application-specific.
"""
class NActionRSPMessage(DIMSEResponseMessage, StatusMixin):
"""N-ACTION-RSP Message.
Complete definition can be found in DICOM PS3.7, 10.3.4.2 N-ACTION-RSP
"""
command_field = 0x8130
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 8130H for the N-ACTION-RSP Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageIDBeingRespondedTo', 'Status',
'AffectedSOPInstanceUID', 'ActionTypeID']
sop_class_uid = dimse_property((0x0000, 0x0002))
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance for which the action was performed.
"""
action_type_id = dimse_property((0x0000, 0x1008))
"""
Values for this field are application-specific.
"""
class NCreateRQMessage(DIMSERequestMessage):
"""N-CREATE-RQ Message.
Complete definition can be found in DICOM PS3.7, 10.3.5.1 N-CREATE-RQ
"""
command_field = 0x0140
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 0140H for the N-CREATE-RQ Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID', 'MessageID',
'AffectedSOPInstanceUID']
sop_class_uid = dimse_property((0x0000, 0x0002))
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance to be created.
"""
class NCreateRSPMessage(DIMSEResponseMessage, StatusMixin):
"""N-CREATE-RSP Message.
Complete definition can be found in DICOM PS3.7, 10.3.5.2 N-CREATE-RSP
"""
command_field = 0x8140
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 8140H for the N-CREATE-RSP Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageIDBeingRespondedTo', 'Status',
'AffectedSOPInstanceUID']
sop_class_uid = dimse_property((0x0000, 0x0002))
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance that was created.
"""
class NDeleteRQMessage(DIMSERequestMessage):
"""N-DELETE-RQ Message.
Complete definition can be found in DICOM PS3.7, 10.3.6.1 N-DELETE-RQ
"""
command_field = 0x0150
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 0150H for the N-DELETE-RQ Message.
"""
command_fields = ['CommandGroupLength', 'RequestedSOPClassUID', 'MessageID',
'RequestedSOPInstanceUID']
sop_class_uid = dimse_property((0x0000, 0x0003))
requested_sop_instance_uid = dimse_property((0x0000, 0x1001))
"""
Contains the UID of the SOP Instance to be deleted.
"""
class NDeleteRSPMessage(DIMSEResponseMessage, StatusMixin):
"""N-DELETE-RSP Message.
Complete definition can be found in DICOM PS3.7, 10.3.6.2 N-DELETE-RSP
"""
command_field = 0x8150
"""
This field distinguishes the DIMSE-N operation conveyed by this Message. The value of this
field shall be set to 8150H for the N-DELETE-RSP Message.
"""
command_fields = ['CommandGroupLength', 'AffectedSOPClassUID',
'MessageIDBeingRespondedTo', 'Status',
'AffectedSOPInstanceUID']
sop_class_uid = dimse_property((0x0000, 0x0002))
affected_sop_instance_uid = dimse_property((0x0000, 0x1000))
"""
Contains the UID of the SOP Instance that | |
atts[0]
return self
fmt = "(%s %s ANY %s)"
op = "="
def __repr__(self):
return self.fmt % (self.expr, self.op, self.subq)
def __call__(self, assigns, toplevel=0):
cached_column = self.cached_column
cachable = self.cachable
expr = self.expr
subq = self.subq
att = self.att
if cachable:
if cached_column is None:
subqr = subq.eval().rows()
cc = self.cached_column = dump_single_column(subqr, att)
#print self, "cached", self.cached_column
exprvals = expr.value(assigns)
kjDict = kjbuckets.kjDict
compare = self.compare
tt = type
from types import IntType
result = assigns[:]
for i in xrange(len(assigns)):
assignsi = assigns[i]
if tt(assignsi) is IntType: continue
thisval = exprvals[i]
testbtup = BoundTuple()
testbtup.assns = kjDict(assignsi)
if not cachable:
subqr = subq.eval(outerboundtuple=testbtup).rows()
cc = dump_single_column(subqr, att)
#print self, "uncached", cc, thisval
if not compare(thisval, cc):
#print "eliminated", assignsi
result[i] = 0
return result
def compare(self, value, column):
return value in column
def __hash__(self):
return hash(self.subq) ^ ~hash(self.expr)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.expr, other.expr)
if test: return test
return cmp(self.subq, other.subq)
# "expr IN (subq)" same as "expr = ANY (subq)"
InPredicate = QuantEQ
class InLits(NontrivialEqPred):
"""expr IN literals, support dynamic bindings."""
def __init__(self, expr, lits):
self.expr = expr
self.lits = lits
self.cached_lits = None
def initargs(self):
return (self.expr, self.lits)
def uncache(self):
self.cached_lits = None
def domain(self):
d = []
for l in self.lits:
d0 = l.domain()
if d0:
d = d + d0.items()
d0 = self.expr.domain()
if d:
kjSet = kjbuckets.kjSet
return d0 + kjSet(d)
else:
return d0
def relbind(self, dict, db):
newlits = []
for l in self.lits:
newlits.append(l.relbind(dict, db))
self.lits = newlits
self.expr = self.expr.relbind(dict, db)
return self
fmt = "(%s IN %s)"
def __repr__(self):
return self.fmt % (self.expr, self.lits)
def __call__(self, assigns, toplevel=0):
# LITERALS ARE CONSTANT! NEED ONLY LOOK FOR ONE ASSIGN.
tt = type
from types import IntType
litvals = self.cached_lits
if litvals is None:
assigns0 = []
for asn in assigns:
if tt(asn) is not IntType:
assigns0.append(asn)
break
if not assigns0:
# all false/unknown
return assigns
litvals = []
for lit in self.lits:
value = lit.value(assigns0)
litvals.append(value[0])
self.cached_lits = litvals
expr = self.expr
exprvals = expr.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
assignsi = assigns[i]
if tt(assignsi) is IntType: continue
thisval = exprvals[i]
if thisval not in litvals:
#print "eliminated", assignsi
result[i] = 0
return result
def compare(self, value, column):
return value in column
def __hash__(self):
return 10 ^ hash(self.expr)
def __cmp__(self, other):
test = cmp(self.__class__, other.__class__)
if test: return test
test = cmp(self.expr, other.expr)
if test: return test
return cmp(self.lits, other.lits)
class QuantNE(QuantEQ):
"""Quantified not equal any predicate"""
op = "<>"
def compare(self, value, column):
for x in column:
if value!=x: return 1
return 0
### note: faster NOT IN using QuantNE?
class QuantLT(QuantEQ):
"""Quantified less than any predicate"""
op = "<"
def uncache(self):
self.testval = self.cached = self.cached_column = None
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
testval = self.testval = max(column)
self.cached = 1
else:
testval = max(column)
return value < testval
class QuantLE(QuantLT):
"""Quantified less equal any predicate"""
op = "<="
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
testval = self.testval = max(column)
self.cached = 1
else:
testval = max(column)
return value <= testval
class QuantGE(QuantLT):
"""Quantified greater equal any predicate"""
op = ">="
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
testval = self.testval = min(column)
self.cached = 1
else:
testval = min(column)
return value >= testval
class QuantGT(QuantLT):
"""Quantified greater than any predicate"""
op = ">"
def compare(self, value, column):
if self.cachable:
if self.cached:
testval = self.testval
else:
self.testval = testval = min(column)
self.cached = 1
else:
testval = min(column)
return value > testval
def dump_single_column(assigns, att):
"""dump single column assignment"""
result = assigns[:]
for i in xrange(len(result)):
result[i] = result[i][att]
return result
class LessPred(NontrivialEqPred):
op = "<"
def __call__(self, assigns, toplevel=0):
from types import IntType
#print '***********************************************'
#print self.left, self.right
#print assigns
lv = self.left.value(assigns)
rv = self.right.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if not isinstance(t, IntType) and lv[i] >= rv[i]:
result[i] = 0
return result
def __inv__(self):
return LessEqPred(self.right, self.left)
def __hash__(self):
return hash(self.left)^hash(self.right)
class LessEqPred(LessPred):
op = "<="
def __call__(self, assigns, toplevel=0):
from types import IntType
tt = type
lv = self.left.value(assigns)
rv = self.right.value(assigns)
result = assigns[:]
for i in xrange(len(assigns)):
t = assigns[i]
if not isinstance(t, IntType) and lv[i] > rv[i]:
result[i] = 0
return result
def __inv__(self):
return LessPred(self.right, self.left)
class SubQueryExpression(BoundMinus, SimpleRecursive):
"""sub query expression (subq), must eval to single column, single value"""
def __init__(self, subq):
self.subq = subq
self.att = self.cachable = self.cached = self.cached_value = None
def initargs(self):
return (self.subq,)
def uncache(self):
self.cached = self.cached_value = None
def domain(self):
result = self.subq.unbound()
if not result:
self.cachable = 1
#print "expr subq domain", result
return result
def relbind(self, dict, db):
subq = self.subq = self.subq.relbind(db, dict)
# test that subquery is single column and determine att
sl = subq.select_list
atts = sl.attorder
if len(atts)<>1:
raise ValueError, \
"Quantified predicate requires unit select list: %s" % atts
self.att = atts[0]
return self
def __repr__(self):
return "(%s)" % self.subq
def value(self, contexts):
subq = self.subq
att = self.att
if self.cachable:
if self.cached:
cached_value = self.cached_value
else:
self.cached = 1
seval = subq.eval().rows()
lse = len(seval)
if lse<>1:
raise ValueError, \
"const subquery expression must return 1 result: got %s" % lse
self.cached_value = cached_value = seval[0][att]
#print "const subq cached", cached_value
return [cached_value] * len(contexts)
from types import IntType
tt = type
result = contexts[:]
kjDict = kjbuckets.kjDict
for i in xrange(len(contexts)):
contextsi = contexts[i]
if tt(contextsi) is not IntType:
testbtup = BoundTuple()
testbtup.assns = kjDict(contextsi)
#print "subq exp", testbtup
seval = subq.eval(outerboundtuple=testbtup).rows()
lse = len(seval)
if lse<>1:
raise ValueError, \
"dynamic subquery expression must return 1 result: got %s" % lse
result[i] = seval[0][att]
#print "nonconst subq uncached", result[i], contextsi
return result
SELECT_TEMPLATE = """\
SELECT %s %s
FROM %s
WHERE %s
GROUP BY %s
HAVING %s %s
ORDER BY %s %s
"""
def dynamic_binding(ndynamic, dynamic):
"""create bindings from dynamic tuple for ndynamic parameters
if a tuple is given create one
if a list is given create many
"""
from types import ListType, TupleType
if not dynamic:
if ndynamic>0:
raise ValueError, `ndynamic`+" dynamic parameters unbound"
return [kjbuckets.kjDict()]
ldyn = len(dynamic)
undumper = map(None, [0]*ndynamic, range(ndynamic))
undumper = tuple(undumper)
tdyn = type(dynamic)
if tdyn is TupleType:
ldyn = len(dynamic)
if len(dynamic)!=ndynamic:
raise ValueError, "%s,%s: wrong number of dynamics" % (ldyn,ndynamic)
dynamic = [dynamic]
elif tdyn is not ListType:
raise TypeError, "dynamic parameters must be list or tuple"
else:
lens = map(len, dynamic)
ndynamic = max(lens)
if ndynamic!=min(lens):
raise ValueError, "dynamic parameters of inconsistent lengths"
undumper = map(None, [0]*ndynamic, range(ndynamic))
undumper = tuple(undumper)
result = list(dynamic)
kjUndump = kjbuckets.kjUndump
for i in xrange(len(dynamic)):
dyn = dynamic[i]
ldyn = len(dyn)
#print undumper, dyn
if ldyn==1:
dynresult = kjUndump(undumper, dyn[0])
else:
dynresult = kjUndump(undumper, dyn)
result[i] = dynresult
return result
class Selector:
"""For implementing, eg the SQL SELECT statement."""
def __init__(self, alldistinct,
select_list,
table_reference_list,
where_pred,
group_list,
having_cond,
union_select =None,
order_by_spec =None,
ndynamic=0, # number of dyn params expected
):
self.ndynamic = ndynamic
self.alldistinct = alldistinct
self.select_list = select_list
self.table_list = table_reference_list
self.where_pred = where_pred
self.group_list = group_list
self.having_cond = having_cond
self.union_select = union_select
self.order_by = order_by_spec
#self.union_spec = "DISTINCT" # default union mode
self.relbindings = None # binding of relations
self.unbound_set = None # unbound attributes
self.rel_atts = None # graph of alias>attname bound in self
self.all_aggregate = 0
if select_list!="*" and not group_list:
if select_list.contains_aggregate:
### should restore this check somewhere else!
#if select_list.contains_nonaggregate:
#raise ValueError, "aggregates/nonaggregates don't mix without grouping"
self.all_aggregate = 1
if where_pred and where_pred.contains_aggregate:
raise ValueError, "aggregate in WHERE"
self.query_plan = None
def initargs(self):
#print self.alldistinct
#print self.select_list
#print self.table_list
#print self.where_pred
#print self.having_cond
#print self.union_select
#print self.group_list
#print self.order_by
#print self.ndynamic
# note: order by requires special handling
return (self.alldistinct, self.select_list, self.table_list, self.where_pred,
None, self.having_cond, self.union_select, None,
self.ndynamic)
def marshaldata(self):
order_by | |
<reponame>sirrice/dbwipes<filename>dbwipes/server.py
import os
import re
import time
import json
import md5
import pdb
import random
import psycopg2
import traceback
import numpy as np
from functools import wraps
from collections import *
from datetime import datetime
from sqlalchemy import *
from sqlalchemy.pool import NullPool
from flask import Flask, request, render_template, g, redirect, Response, jsonify
from flask_compress import Compress
from flask_cors import CORS, cross_origin
from summary import Summary
from util import *
from db import *
tmpl_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
print tmpl_dir
app = Flask(__name__, template_folder=tmpl_dir)
#CORS(Compress(app), supports_credentials=True)
def build_preflight_response():
response = make_response()
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add('Access-Control-Allow-Headers', "*")
response.headers.add('Access-Control-Allow-Methods', "*")
return response
def build_actual_response(response):
response.headers.add("Access-Control-Allow-Origin", "*")
return response
def returns_json(f):
@wraps(f)
def json_returner(*args, **kwargs):
r = f(*args, **kwargs)
if not isinstance(r, basestring):
r = json.dumps(r, cls=SummaryEncoder)
resp = Response(r, content_type='application/json')
resp.headers["Access-Control-Allow-Origin"] = "*"
resp.headers["Access-Control-Allow-Headers"] = "*"
resp.headers["Access-Control-Allow-Methods"] = "*"
print(resp.headers["Access-Control-Allow-Origin"])
#import pdb; pdb.set_trace()
return resp
return json_returner
def cache_result(key, value):
engine = db_connect('cache')
db = engine.connect()
q = "insert into requests values(%s, %s)"
db.execute(q, key, value)
db.close()
engine.dispose()
@app.before_request
def before_request():
try:
dbname = None
if 'db' in request.form:
dbname = request.form['db']
elif 'db' in request.args:
dbname = request.args['db']
g.dbname = dbname
g.engine = None
g.db = None
if dbname:
g.engine = db_connect(dbname)
g.db = g.engine.connect()
except:
traceback.print_exc()
g.engine = None
g.db = None
@app.teardown_request
def teardown_request(exception):
try:
if hasattr(g, 'db'):
g.db.close()
except Exception as e:
pass
try:
if hasattr(g, 'engine'):
g.engine.dispose()
except Exception as e:
pass
@app.route('/', methods=["POST", "GET"])
def index():
try:
import scorpion
enable_scorpion = 1
title = 'DBWipes + Scorpion!'
except:
enable_scorpion = 0
title = 'DBWipes'
context = {
'enableScorpion': enable_scorpion,
'js': 'summary',
'study': 0,
'title': title,
'debug': True
}
return render_template("index_base.html", **context)
@app.route('/drspott/', methods=["POST", "GET"])
def drspott():
context = {
'enableScorpion': 1,
'js': 'summarydrspott',
'study': 0,
'title': 'DBWipes + Scorpion!'
}
return render_template("index_base.html", **context)
@app.route('/hidden/', methods=["POST", "GET"])
def hidden():
context = {
'enableScorpion': 1,
'js': 'summary',
'title': 'DBWipes + Scorpion!',
'study': 0,
'debug': True
}
return render_template("index_base.html", **context)
@app.route('/study/name/', methods=["POST", "GET"])
def study_name():
return render_template("study/name.html")
@app.route('/study/', methods=["POST", "GET"])
@app.route('/study/dir/', methods=["POST", "GET"])
def study_dir():
return render_template("study/dir.html", **{
'taskids': enumerate([4,5, 8, 10])
})
@app.route('/study/<int:idx>/', methods=["POST", "GET"])
def index_idx(idx):
templates = []
templates.append("study/index%d.html" % idx)
templates.append("index_base.html")
js = 'study/summary%d' % idx
title = "DBWipes Tutorial"
subtitle = ""
enable_scorpion = 1
if idx in [0,1,2,4]:
enable_scorpion = 0
if idx == 2:
title = 'DBWipes Verification Test'
if idx == 3:
title = "DBWipes + Scorpion Tutorial"
if idx >= 4:
title = "DBWipes User Study"
subtitle = "without Scorpion"
# hard1 sum
if idx == 4:
enable_scorpion = 0
if idx == 5:
subtitle = "with Scorpion"
js = 'study/summary4'
enable_scorpion = 1
templates[0] = "study/index4.html"
# intel
if idx == 6:
subtitle = "without Scorpion"
js = 'study/summary6'
enable_scorpion = 0
if idx == 7:
subtitle = "with Scorpion"
js = 'study/summary6'
enable_scorpion = 1
templates[0] = "study/index6.html"
# hard1 avg
if idx == 8:
enable_scorpion = 0
js = 'study/summary8'
templates[0] = "study/index8.html"
if idx == 9:
subtitle = "with Scorpion"
js = 'study/summary8'
enable_scorpion = 1
templates[0] = "study/index8.html"
# hard2 sum
if idx == 10:
js = 'study/summary10'
enable_scorpion = 0
templates[0] = "study/index10.html"
if idx == 11:
subtitle = "with Scorpion"
js = 'study/summary10'
enable_scorpion = 1
templates[0] = "study/index10.html"
# hard2 avg
if idx == 12:
js = 'study/summary12'
enable_scorpion = 0
templates[0] = "study/index12.html"
if idx == 13:
subtitle = "with Scorpion"
js = 'study/summary12'
enable_scorpion = 1
templates[0] = "study/index12.html"
context = {
'enableScorpion': enable_scorpion,
'idx': idx,
'js': js,
'study': 1,
'title': title,
'subtitle': subtitle,
'debug': False
}
print context
return render_template(templates, **context)
@app.route('/tasks/get/', methods=["POST", "GET"])
@returns_json
def task_get():
name = request.form['name']
if not name:
return { 'status': False }
try:
db = db_connect("tasks")
try:
q = """create table tasks(
name varchar,
tstamp timestamp default current_timestamp,
tasks text
)"""
db.execute(q)
except:
pass
q = """select * from tasks where name = %s"""
rows = db.execute(q, name).fetchall()
if rows:
tasks = json.loads(rows[0][2])
return {
'status': True,
'tasks': tasks
}
else:
alltasks = [4, 8, 10, 12]
options = np.random.choice(alltasks, 3, replace=False)
tasks = []
for task in options:
enable_scorpion = random.random() > .5
if enable_scorpion:
task += 1
tasks.append(task)
q = """insert into tasks values(%s, default, %s)"""
db.execute(q, (name, json.dumps(tasks)))
return {
'status': True,
'tasks': tasks
}
except Exception as e:
print e
pass
finally:
if db:
db.dispose()
@app.route('/tasks/submit/', methods=["POST", "GET"])
@returns_json
def task_submit():
print request.form
name = request.form['name']
taskid = request.form['taskid']
data = request.form['data']
db = db_connect("tasks")
try:
q = """create table responses(
name varchar,
tstamp timestamp default current_timestamp,
taskid varchar,
data text
)"""
db.execute(q)
except:
pass
q = "insert into responses values(%s, default, %s, %s)"
db.execute(q, (name, taskid, data))
db.dispose()
return {'status': True}
@app.route('/api/databases/', methods=['POST', 'GET'])
@returns_json
def dbs():
q = "SELECT datname FROM pg_database where datistemplate = false;"
dbnames = [str(row[0]) for row in g.db.execute(q).fetchall()]
return {'databases': dbnames}
@app.route('/api/tables/', methods=['POST', 'GET'])
@returns_json
def tables():
cur = g.db.cursor()
res = cur.execute("SELECT tablename from pg_tables WHERE schemaname = 'public'")
tables = [str(row[0]) for row in res.fetchall()]
return {'tables': tables}
def get_schema(db_or_name, table):
summary = Summary(db_or_name, table)
try:
cols_and_types = summary.get_columns_and_types()
schema = dict(cols_and_types)
return schema
except Exception as e:
traceback.print_exc()
finally:
summary.close()
return {}
@app.route('/api/schema/', methods=['POST', 'GET'])
@returns_json
def schema():
table = request.args.get('table')
if not table:
return {}
ret = {}
ret['schema'] = get_schema(g.db, table)
return ret
@app.route('/api/requestid/', methods=['OPTIONS','POST', 'GET'])
#@cross_origin(origins="*")
@returns_json
def requestid():
if request.method == "OPTIONS":
print("GOT OPTIONS ALLOWING CORS")
return build_preflight_response()
try:
from scorpion.util import Status
status = Status()
requestid = status.reqid
status.close()
print("returning reqid")
return {'requestid': requestid}
except Exception as e:
return {'error': str(e)}
@app.route('/api/status/')#, methods=['OPTIONS', 'POST', 'GET'])
#@cross_origin(origins="*")
@returns_json
def api_status():
try:
from scorpion.util import Status
rid = int(request.args.get('requestid'))
status = Status(rid)
ret = status.latest_status()
label_rules = status.get_rules()
status.close()
partial_rules = []
for label, rules in label_rules:
partial_rules.extend(rules)
rules_hash = hash(str(partial_rules))
return {
'status': ret,
'results': partial_rules,
'hash': rules_hash
}
except Exception as e:
return {
'status': str(e),
'results': []
}
@app.route('/api/tuples/', methods=['POST', 'GET'])
@returns_json
def api_tuples():
ret = { }
jsonstr = request.args.get('json')
if not jsonstr:
print "query: no json string. giving up"
return ret
args = json.loads(jsonstr)
dbname = args.get('db')
table = args.get('table')
where = args.get('where', []) or []
where, params = where_to_sql(where)
if where:
where = 'AND %s' % where
print where
print params
query = """WITH XXXX as (select count(*) from %s WHERE 1 = 1 %s)
SELECT * FROM %s
WHERE random() <= 50.0 / (select * from XXXX) %s
LIMIT 50"""
query = query % (table, where, table, where)
try:
conn = g.db
cur = conn.execute(query, [params+params])
rows = cur.fetchall()
cur.close()
data = [dict(zip(cur.keys(), vals)) for vals in rows]
ret['data'] = data
ret['schema'] = get_schema(g.db, table)
except Exception as e:
traceback.print_exc()
ret = {}
raise
print "%d points returned" % len(ret.get('data', []))
return ret
@app.route('/api/query/', methods=['POST', 'GET'])
@returns_json
def api_query():
ret = { }
jsonstr = request.args.get('json')
if not jsonstr:
print "query: no json string. giving up"
return ret
args = json.loads(jsonstr)
dbname = args.get('db')
table = args.get('table')
o, params = create_sql_obj(g.db, args)
o.limit = 10000;
query = str(o)
print query
print params
if not dbname or not table or not query:
print "query: no db/table/query. giving up"
return ret
try:
conn = g.db
cur = conn.execute(query, [params])
rows = cur.fetchall()
cur.close()
data = [dict(zip(cur.keys(), vals)) for vals in rows]
ret['data'] = data
ret['schema'] = get_schema(g.db, table)
except Exception as e:
traceback.print_exc()
ret = {}
print "%d points returned" % len(ret.get('data', []))
return ret
@app.route('/api/column_distribution/', methods=['POST', 'GET'])
@returns_json
def column_distribution():
dbname = request.args.get('db', 'intel')
tablename = request.args.get('table', 'readings')
where = request.args.get('where', '')
col = request.args.get('col')
try:
nbuckets = int(request.args.get('nbuckets', 100))
except Exception as e:
print e
nbuckets = 100
summary = Summary(g.db, tablename, nbuckets=nbuckets, where=where)
try:
typ = summary.get_type(col)
stats = summary.get_col_stats(col, typ)
except Exception as e:
traceback.print_exc()
finally:
summary.close()
data = {
'col': col,
'type': typ,
'stats': stats
}
context = { "data": data }
return context
@app.route('/api/column_distributions/', methods=['POST', 'GET'])
@returns_json
def column_distributions():
dbname = request.args.get('db', 'intel')
tablename = request.args.get('table', 'readings')
where = request.args.get('where', '')
try:
nbuckets = int(request.args.get('nbuckets', 100))
except Exception as e:
print e
nbuckets = 100
# #from monetdb import sql as msql
# #db = msql.connect(user='monetdb', password='<PASSWORD>', database=dbname)
summary = Summary(g.db, tablename, nbuckets=nbuckets, where=where)
print 'where: %s' % where
try:
| |
<filename>books/api/ContactsApi.py
#$Id$#
from os.path import basename
from json import dumps
from books.util.ZohoHttpClient import ZohoHttpClient
from books.parser.ContactParser import ContactParser
from .Api import Api
base_url = Api().base_url + 'contacts/'
parser = ContactParser()
zoho_http_client = ZohoHttpClient()
class ContactsApi:
"""ContactsApi class is used:
1.To get the list of contacts for a particular organization.
2.To get details of particular contact.
3.To create a new contact for an organization.
4.To update a contact.
5.To delete a contact.
6.To mark a contact as active.
7.To mark a contact as inactive.
8.To enable payment reminders for a contact
9.To disable payment reminders for a contact.
10.To send email statement to a contact.
11.To get the statement mail content
12.To send email to a contact.
13.To list recent activities of a contact.
14.To list the refund history of a contact.
15.To track a contact for 1099 reporting.
16.To untrack a contact for 1099 reporting.
"""
def __init__(self, authtoken, organization_id):
"""Initialize Contacts Api using user's authtoken and organization id.
Args:
authtoken(str): User's authtoken.
organization_id(str): User's organization id.
"""
self.details={
'authtoken': authtoken,
'organization_id': organization_id
}
def get_contacts(self, parameter=None):
"""List of contacts with pagination for a particular organization.
Args:
parameter(dict, optional): Filter with which the list has to be
displayed. Defaults to None.
Returns:
instance: List of contact objects with pagination.
"""
response = zoho_http_client.get(base_url, self.details, parameter)
return parser.get_contacts(response)
def get(self, contact_id):
"""Get details of a contact.
Args:
contact_id(str): Contact_id of a particular contact.
Returns:
instance: Contact object.
"""
url = base_url + contact_id
response = zoho_http_client.get(url, self.details, None)
return parser.get_contact(response)
def create(self, contact):
"""Create a contact.
Args:
contact(instance): Contact object.
Returns:
instance: Contact object.
"""
data = contact.to_json()
field = {
'JSONString':dumps(data)
}
response = zoho_http_client.post(base_url, self.details, field, None)
return parser.get_contact(response)
def update(self, contact_id, contact):
"""Update a contact with the given information.
Args:
contact_id(str): Contact_id of the contact that has to be updated.
contact(instance): Contact object which has the information that
has to be updated.
Returns:
instance: Contact object.
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id
data = contact.to_json()
field = {
'JSONString':dumps(data)
}
response = zoho_http_client.put(url, self.details, field, None)
return parser.get_contact(response)
def delete(self, contact_id):
"""Delete a particular contact.
Args:
contact_id(str): Contact id of the contact to be deleted.
Returns:
str: Success message('The contact has been deleted').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id
response = zoho_http_client.delete(url, self.details)
return parser.get_message(response)
def mark_active(self, contact_id):
"""Mark a contact as active.
Args:
contact_id(str): Contact id of the contact.
Returns:
str: Success message('The contact has been marked as active').
Raises:
Books exception: If status is not '200' or '201'.
"""
url= base_url + contact_id + '/active'
response = zoho_http_client.post(url, self.details, '')
return parser.get_message(response)
def mark_inactive(self, contact_id):
"""Mark a contact as inactive.
Args:
contact_id(str): Contact id of the contact.
Returns:
str: Success message('The contact has been marked as inactive').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/inactive'
response = zoho_http_client.post(url, self.details, '')
return parser.get_message(response)
def enable_payment_reminder(self, contact_id):
"""Enable automated payment reminders for a contact.
Args:
contact_id(str): Contact id of the contact.
Returns:
str: Success message('All reminders associated with this contact
have been enabled').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/paymentreminder/enable'
response = zoho_http_client.post(url, self.details, '')
return parser.get_message(response)
def disable_payment_reminder(self, contact_id):
"""Disable automated payment reminders for a contact.
Args:
contact_id(str): Contact id of the contact.
Returns:
str: Success message('All reminders associated with this contact
have been disabled').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/paymentreminder/disable'
response = zoho_http_client.post(url, self.details, '')
return parser.get_message(response)
def email_statement(self, contact_id, email,start_date=None, end_date=None,
attachments=None):
"""Email statement to the contact. If JSONString is not inputted, mail
will be sent with the default mail content.
Args:
contact_id(str): Contact id of the contact.
email(instance): Email.
start_date(str, optional): Starting date of the statement.
Default to None.
end_date(str, optional): Ending date of the statement.
Default to None.
If starting date and ending date is not given current month's
statement will be sent to the contact.
attachments(list): List of files to be attached.
Returns:
str: Success message('Statement has to been sent to the customer').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/statements/email'
data = {}
data = email.to_json()
if start_date is not None and end_date is not None:
data['start_date'] = start_date
data['end_date'] = end_date
fields = {
'JSONString': dumps(data)
}
if attachments is None:
response = zoho_http_client.post(url, self.details, fields)
else:
file_list = []
for value in attachments:
attachment = {
'attachments': {
'filename': basename(value),
'content': open(value).read()
}
}
file_list.append(attachment)
response = zoho_http_client.post(url, self.details, fields,
None, file_list)
return parser.get_message(response)
def get_statement_mail_content(self, contact_id, start_date, end_date):
"""Get the statement mail content.
Args:
start_date(str): Start date for statement.
end_date(str): End date for statement.
Returns:
instance: Email object.
Raises:
Books exception:if status is not '200' or '201'.
"""
url = base_url + contact_id + '/statements/email'
query_string = {
'start_date': start_date,
'end_date': end_date
}
response = zoho_http_client.get(url, self.details, query_string)
return parser.get_mail_content(response)
def email_contact(self, contact_id, email, attachments=None,
send_customer_statement=None):
"""Send email to contact.
Args:
contact_id(str): Contact id of the contact.
email(instance): Email object.
attachments(list, optional): List of files to be attached.
Default to None.
send_customer_statement(bool, optional): Send customer statement
pdf with email. Default to None.
Returns:
str: Success message('Email has been sent').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/email'
json_object = dumps(email.to_json())
data = {
'JSONString': json_object
}
if attachments is not None and send_customer_statement is not None:
file_list = []
for value in attachments:
attachment = {
'attachments': {
'filename': basename(value),
'content': open(value).read()
}
}
file_list.append(attachment)
parameter = {
'send_customer_statement': send_customer_statement
}
response = zoho_http_client.post(url, self.details, data,
parameter, file_list)
elif attachments is not None:
file_list = []
for value in attachments:
attachment = {
'attachments': {
'filename': basename(value),
'content': open(value).read()
}
}
file_list.append(attachment)
response = zoho_http_client.post(url, self.details,
data, None, file_list)
elif send_customer_statement is not None:
parameter = {
'send_customer_statement': send_customer_statement
}
response = zoho_http_client.post(url, self.details, data,
parameter)
else:
response = zoho_http_client.post(url, self.details, data)
return parser.get_message(response)
def list_comments(self, contact_id):
"""List recent activities of a contact with pagination.
Args:
contact_id(str): Contact id of the contact.
Returns:
instance: Comments list object.
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/comments'
response = zoho_http_client.get(url, self.details)
return parser.get_comment_list(response)
def get_comments(self, contact_id):
"""List recent activities of a contact.
Args:
contact_id(str): Contact id of the contact.
Returns:
list: List of comments object.
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/comments'
response = zoho_http_client.get(url, self.details)
return parser.get_comment_list(response).get_comments()
def list_refunds(self, contact_id):
"""List the refund history of a contact with pagination.
Args:
contact_id(str): Contact id of the contact.
Returns:
instance: Refunds list object.
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/refunds'
response = zoho_http_client.get(url, self.details)
return parser.get_refund_list(response)
def get_refunds(self, contact_id):
"""List the refund history of a contact.
Args:
contact_id(str): Contact id of the contact.
Returns:
list: List of refunds object.
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id + '/refunds'
response = zoho_http_client.get(url, self.details)
return parser.get_refund_list(response).get_creditnote_refunds()
def track_1099(self, contact_id):
"""Track a contact for 1099 reporting.
Args:
contact_id(str): Contact id of the contact.
Returns:
str: Success message('1099 tracking is enabled').
Raises:
Books exception: If status is not '200' or '201'.
"""
url = base_url + contact_id | |
<gh_stars>0
#============================================================================
"""
a library for generating OpenSCAD_ 3D models
https://www.github.com/wovo/psml
\(c) <NAME> (<EMAIL>)
Distributed under the Boost Software License, Version 1.0.
.. _OpenSCAD: https://www.openscad.org/
.. _typeguard: https://pypi.org/project/typeguard/
-----------------------------------------------------------------------------
This is a Python library (Python 3 required) for writing
3D model code that can be rendered and processed by OpenSCAD_.
The library has a vector class.
A vector holds x, y and (optional) z numeric values.
A vector is used to specify (in 2D or 3D) a size,
a location, or a displacement, or sometimes just to hold 2 or 3 values.
A vector can be created from two or three values.
Two vectors can be added or subtracted.
A vector can multiplied with or divided by a numeric value.
When a function requires a vector, it can in most
cases be specified either by a vector value, or as 2 or 3
individual numeric parameters.
This library creates and manipulates 3D solid objects.
The image below shows the basic solid objects:
box, cylinder, cone, and sphere.
.. figure:: ../examples/images/intro_bccs_128.png
Basic flat (2D) objects are rectangle, circle, polygon, and text,
as shown in the next image.
.. figure:: ../examples/images/intro_rcpt_128.png
:target: ../examples/images/intro_rcpt_512.png
A flat object can be extended into a solid object
by extruding it (in the z direction), while
optionally twisting it around the z axis in the process.
The cross shown below was extruded and twisted while
it was at the origin. The right circle was extruded and twisted
while it was just to the right of the origin.
.. figure:: ../examples/images/intro_extrude_128.png
:target: ../examples/images/intro_extrude_512.png
Objects and be added, subtracted and intersected
with the operators +, - and \*.
The image below shows two separate cylinders, the addition of
these two cylinders, the second cylinder subtracted from the
first one, and the intersection of the two.
.. figure:: ../examples/images/intro_asi_128.png
:target: ../examples/images/intro_asi_512.png
Manipulators can be applied to an object with the ** (power) operator.
Basic manipulators are vector, rotate, mirror, scale and resize.
The example below show an object unchanged, , shifted up,
rotated along the x axis, mirrored in the y-z plane (note the eyes),
scaled (by 2 in the y and z directions), and resized
(to fit in a cube).
.. figure:: ../examples/images/intro_vrmsr_128.png
:target: ../examples/images/intro_vrmsr_512.png
The repeat2, repeat4 and repeat8 manipulators repeat their subject.
Repeat2 does this at the original location,
and shifted by the specified vector.
Repeat4 does this at the 4 corners of the rectangle
specified by the vector.
Repeat8 does this at the 8 corners of the box
specified by the vector.
.. figure:: ../examples/images/intro_repeat1_128.png
:target: ../examples/images/intro_repeat1_512.png
The negative manipulator creates a dominant emptiness from its subject.
The image below shows at the left the addition of two normal pipes.
The result is not a usable pipe crossing because the walls of each
pipe will block the other pipe.
In the middle it shows the addition of two pipes of which the empty
interior is dominant. This produces a 'successful' pipe crossing.
But this also removes part of the vertical axle.
At the right the crossing was first made positive, which
reduces the dominant emptiness to an normal, and then the
vertical axle was added.
.. figure:: ../examples/images/intro_negatives1_128.png
:target: ../examples/images/intro_negatives1_512.png
In the code examples solid objects are created.
What is not show is that such an object must be written to a
file to be processed by OpenSCAD, using a write() call.
This documentation is meant to be usable on its own,
without having to read the OpenSCAD documentation,
hence it is worded as if the library provides all the
functionality, while in fact it is in most cases just a thin layer
on top of the OpenSCAD language.
Some OpenSCAD features are not directly available in the library.
To compensate, a solid can be created from a string, which
is passed directly to OpenSCAD.
Likewise, a manipulator can be constructed from a lambda, which
gets the string representation of its subject as parameter.
The library has type hints.
The examples use typeguard_ to check these hints.
-----------------------------------------------------------------------------
"""
#============================================================================
from __future__ import annotations
from typing import Union, Tuple, Iterable
# specifiers used in the type annotations
_shape_or_shape_list = Union[ "shape", "_shape_list" ]
_shape_or_none = Union[ "shape", None ]
_str_or_none = Union[ str, None ]
_float_or_vector = Union[ float, "vector" ]
_float_or_none = Union[ float, None ]
_vector_or_pair = Union[ "vector", Tuple[float,float]]
#============================================================================
#
# miscellaneous
#
#============================================================================
# the default number of facets for a circle and a sphere
number_of_circle_facets = 32
number_of_sphere_facets = 32
number_of_text_facets = 32
number_of_extrude_facets = 32
def facets( numer_of_facets: int ) -> None:
"""accuracy (number of facets) of circles, spheres and fonts
The default setting (32) is a compromise between speed and accuracy.
For quick rendering of complex designs
a lower value (10, or even 5) might be appropriate.
This function has effect on shapes that are created
after its call, so better call it before you create any elements.
.. figure:: ../examples/images/example_facets1_128.png
:target: ../examples/images/example_facets1_512.png
.. literalinclude:: ../examples/example_facets1.py
:lines: 10
.. figure:: ../examples/images/example_facets2_128.png
:target: ../examples/images/example_facets2_512.png
.. literalinclude:: ../examples/example_facets2.py
:lines: 9,11
"""
global number_of_circle_facets
number_of_circle_facets = numer_of_facets
global number_of_sphere_facets
number_of_sphere_facets = numer_of_facets
global number_of_text_facets
number_of_text_facets = numer_of_facets
global number_of_extrude_facets
number_of_extrude_facets = numer_of_facets
def _indent( txt: str ) -> str:
"""return the text with all lines indented one indent step
"""
return "".join( map(
lambda s: "" if s.strip() == "" else " " + s + "\n",
txt.split( "\n" )
))
def _apply2(
s1 : str,
s2 : str,
a : _shape_or_none,
b : _shape_or_none,
) -> shape:
"""
apply an OpenSCAD operation to two shapes
:param s1: operation to apply to the positive parts
:param s2: operation to apply to the negative parts
:param a: first shape
:param b: second shape
"""
if b == None:
b = shape( "", "" )
if a == None:
a = b
return shape(
s1 + "{\n" + _indent(
a._positive() + "\n" +
b._positive() + "\n" ) +
"}",
s2 + "{\n" + _indent(
a._negative() + "\n" +
b._negative() + "\n" ) +
"}",
)
def apply(
text : str,
subject : _shape_or_none
) -> _shape_or_none:
"""apply an OpenSCAD operation to a shape
:param text: the text of the OpenSCAD operation
:param subject: the shape to which the operation is applied
This function applies an OpenSCAD operation to a shape.
This can be useful for OpenSCAD operations that are not
otherwise available in the library.
For convenience, single quotes in the text are replaced by
double quotes (OpenSCAD uses double quotes).
Coloring and translation are available in the library,
but could be done with OpenSCAD functions as shown in the example.
.. figure:: ../examples/images/example_apply1_128.png
:target: ../examples/images/example_apply1_512.png
.. literalinclude:: ../examples/example_apply1.py
:lines: 10
"""
text = text.replace( "'", '"' )
return None if subject == None else shape(
text + "{\n" + _indent(
subject._positive() + "\n" ) +
"}",
text + "{\n" + _indent(
subject._negative() + "\n" ) +
"}",
)
#============================================================================
#
# shape
#
#============================================================================
class shape:
"""2D or 3D shape
Shapes can be added, subtracted or intersected by using the
+, - or * operators.
"""
def __init__( self,
positive : str,
negative : str = ""
):
"""a simple shape
:param positive: the OpenSCAD text representation
of the positive parts
:param negative: (optional) the OpenSCAD text representation
of the dominant negative parts
This constructor creates a shape that has a
fixed textual representation.
"""
self._positive_text = positive
self._negative_text = negative
def _merge( self ) -> shape:
"""hook for shape_list
"""
return self
def _positive( self ):
"""the OpenSCAD text representation of the positive parts
This method returns the OpenSCAD representation of the
positive parts of the shape.
"""
return self._merge()._positive_text
def _negative( self ):
"""the OpenSCAD text representation of the dominant negative parts
This method returns the OpenSCAD representation of the
dominant negative parts of the shape.
"""
return self._merge()._negative_text
def __str__( self ) -> str:
"""the OpenSCAD text representation of the shape
This method returns the OpenSCAD representation of the
shape.
"""
return ( self - shape( self._negative(), "" | |
#!/usr/bin/env python
import argparse
import copy
import math
import pickle
import time
from collections import namedtuple
import os
import numpy as np
import pandas as pd
import random
import ray
from ray import tune
from eval_model import Query, GenerateQuery, ReportEsts
import datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
from text_infer import TrainedModel, infer_naive, infer_skip, q_error
from common import Column, CsvTable, Table, TableDataset
from estimators import *
from made import MADE, MaskedLinear
from torch.utils.data import DataLoader, Dataset
from transformer import Transformer
# Pass SILENT=1 to make query evaluation less verbose.
SILENT = "SILENT" in os.environ
parser = argparse.ArgumentParser()
parser.add_argument(
"--run",
nargs="+",
default=["test_simple", "test_url"],
type=str,
required=False,
help="List of experiments to run")
args = parser.parse_args()
def gen_dryad_query_set():
print("Generating query set")
rng = np.random.RandomState(0)
lines = open("datasets/article-urls.trim").readlines()
data = "".join(lines)
queries = []
likelihoods = []
for i in range(100):
pos = rng.randint(0, len(data) - 10)
k = rng.choice([2, 3, 4, 5])
token = data[pos:pos + k]
queries.append(token)
# likelihood = data.count(token)
# print(i, token, likelihood)
print(queries)
return queries
# Common config. Each key is auto set as an attribute (i.e. NaruTrainer.<attr>)
# so try to avoid any name conflicts with members of that class.
BASE_CONFIG = {
"cwd": os.getcwd(),
"epochs_per_iteration": 1,
"num_eval_queries_per_iteration": 100,
"num_eval_queries_at_end": 1000,
"epochs": 10,
"seed": None,
"order_seed": None,
"bs": 2048,
"order": None,
"layers": 2,
"fc_hiddens": 128,
"warmups": 1000,
"residual": True,
"direct_io": True,
"query_filters": [5, 12],
"force_query_cols": None,
"embs_tied": False,
"embed_size": 32,
"input_no_emb_if_leq": True,
# If set, load this checkpoint and run eval immediately. No training.
"checkpoint_to_load": None,
# Dropout for wildcard skipping.
"disable_learnable_unk": False,
"per_row_dropout": True,
"dropout": 0,
"fixed_dropout_ratio": False,
"asserts": None,
"special_orders": 0,
"special_order_seed": 0,
"shuffle_at_data_level": False,
# Eval.
"eval_heuristic": True,
"eval_psamples": [100, 1000, 10000],
# Text modeling options.
"use_transformer": False,
"prefix_dropout": False,
"transformer_args": {},
"compute_test_loss": False,
"text_eval_corpus": [],
"text_eval_fraction": 1,
# TODO do the below options actually work?
"entropy_order": False,
"reverse_entropy": False,
"num_orderings": 1,
}
EXPERIMENT_CONFIGS = {
### TEST CONFIGS ###
# These are run by default if you don't specify --run.
"test_simple": dict(
BASE_CONFIG, **{
"dataset": "census",
"order_seed": None,
"epochs": 50,
"epochs_per_iteration": 10,
"num_eval_queries_per_iteration": 2,
"num_eval_queries_at_end": 20,
"special_orders": 10, # <-- comment out to disable MO
"fc_hiddens": 256, # <-- 256 vs 180
"layers": 4,
"bs": 128,
}),
"test_url": dict(
BASE_CONFIG, **{
"dataset": "url-tiny",
"order_seed": None,
"use_transformer": True,
"prefix_dropout": True,
"per_row_dropout": False,
"compute_test_loss": True,
"layers": 4,
"fc_hiddens": 256,
"epochs": 1000,
"epochs_per_iteration": 100,
"num_eval_queries_per_iteration": 0,
"num_eval_queries_at_end": 0,
"bs": 128,
"text_eval_fraction": 0.1,
"eval_psamples": [100, 1000],
"transformer_args": {
"num_blocks": 4,
"d_model": 16,
"d_ff": 64,
"num_heads": 4,
},
"text_eval_corpus": [
"hoo",
],
}),
# dataset from https://datadryad.org/stash/dataset/doi:10.5061/dryad.p8s0j
# postprocessed via awk '{print $2}' to strip the line numbers
"dryad": dict(
BASE_CONFIG,
**{
"dataset": "dryad-urls",
"order_seed": None,
"use_transformer": True,
"prefix_dropout": True,
"compute_test_loss": True,
"bs": 512,
"epochs": 20,
"epochs_per_iterations": 20,
"layers": 4,
"eval_psamples": [100, 1000],
"fc_hiddens": 256,
"transformer_args": {
"num_blocks": 8,
"d_model": 32,
"d_ff": 256,
"num_heads": 4,
},
"embed_size": 4,
"num_eval_queries_per_iteration": 0,
"num_eval_queries_at_end": 0,
"text_eval_corpus": gen_dryad_query_set,
"text_eval_fraction": 1,
}),
### EXPERIMENT CONFIGS ###
# Run multiple experiments concurrently by using the --run flag, ex:
# $ ./train.py --run kdd census
"kdd": dict(
BASE_CONFIG, **{
"dataset": tune.grid_search(["kdd"]),
"order_seed": tune.grid_search([None]),
"epochs": 200,
"epochs_per_iteration": 50,
"warmups": 1000,
"layers": 4,
"fc_hiddens": 256,
"per_row_dropout": True,
"input_no_emb_if_leq": False,
}),
"census": dict(
BASE_CONFIG, **{
"dataset": tune.grid_search(["census"]),
"order_seed": tune.grid_search([None]),
"epochs": 20,
"epochs_per_iteration": 5,
"warmups": 2000,
"layers": 4,
"fc_hiddens": 256,
"per_row_dropout": True,
"input_no_emb_if_leq": False,
}),
"dmv-full": dict(
BASE_CONFIG, **{
"dataset": tune.grid_search(["dmv-full"]),
"order_seed": tune.grid_search([None]),
"warmups": 6000,
"epochs": 20,
"epochs_per_iteration": 5,
"layers": 4,
"fc_hiddens": 256,
"per_row_dropout": True,
"input_no_emb_if_leq": False,
}),
}
EXPERIMENT_CONFIGS["dryad-small"] = dict(
EXPERIMENT_CONFIGS["dryad"],
**{
"dataset": "dryad-urls-small",
"prefix_dropout": True,
"embed_size": 8,
"bs": 512,
"warmups": 100,
"epochs": 1000,
"epochs_per_iteration": 5,
"text_eval_corpus": [
".com", # 1.8m
# "x", # 591742
# "rea", # 150133
"bbc", # 21000
# "zz", # 9241
"query", # 58
],
"eval_psamples": [100, 1000],
})
for key in ["kdd", "<KEY>", "census"]:
config = EXPERIMENT_CONFIGS[key]
# Ablation study for different architectures.
EXPERIMENT_CONFIGS[key + "-arch"] = dict(
config, **{
"order_seed": None,
"layers": tune.grid_search([2, 4, 6]),
"fc_hiddens": tune.grid_search([64, 128, 512]),
})
# See if disabling embed learning matters
EXPERIMENT_CONFIGS[key + "-nolearnunk"] = dict(
config, **{
"disable_learnable_unk": True,
})
# See if disabling non embed
EXPERIMENT_CONFIGS[key + "-forceembed"] = dict(
config, **{
"input_no_emb_if_leq": False,
})
# FINAL icml
EXPERIMENT_CONFIGS[key + "-final"] = dict(
config, **{
"per_row_dropout": tune.grid_search([False, 2]),
"num_eval_queries_per_iteration": 0,
"num_eval_queries_at_end": 1000,
"order_seed": tune.grid_search([0, 1, 2, 3, 4, 5, 6, 7]),
})
# FINAL icml mo
EXPERIMENT_CONFIGS[key + "-final-mo"] = dict(
config, **{
"per_row_dropout": tune.grid_search([False, 2]),
"num_eval_queries_per_iteration": 0,
"num_eval_queries_at_end": 1000,
"special_orders": 10,
"special_order_seed": tune.grid_search([0, 1, 2, 3, 4, 5, 6, 7]),
"order_seed": None,
})
def get_device():
return 'cuda' if torch.cuda.is_available() else 'cpu'
# Training.
# For multi-order experiments, we want to have all randomly sampled orders.
_SPECIAL_ORDERS = {
'dmv': [],
'dmv-full': [],
'census': [],
'kdd': [],
}
def Entropy(name, data, bases=None):
import scipy.stats
s = 'Entropy of {}:'.format(name)
ret = []
for base in bases:
assert base == 2 or base == 'e' or base is None
e = scipy.stats.entropy(data, base=base if base != 'e' else None)
ret.append(e)
unit = 'nats' if (base == 'e' or base is None) else 'bits'
s += ' {:.4f} {}'.format(e, unit)
print(s)
return ret
def run_epoch(split,
model,
opt,
train_data,
val_data=None,
batch_size=100,
upto=None,
epoch_num=None,
verbose=False,
log_every=10,
return_losses=False,
child=None,
table_bits=None,
warmups=1000):
torch.set_grad_enabled(split == 'train')
model.train() if split == 'train' else model.eval()
if child:
child.train() if split == 'train' else child.eval()
dataset = train_data if split == 'train' else val_data
losses = []
loader = DataLoader(
dataset, batch_size=batch_size, shuffle=(split == 'train'))
# How many orderings to run for the same batch?
nsamples = 1
if hasattr(model, 'orderings'):
nsamples = len(model.orderings)
if not SILENT:
print('setting nsamples to', nsamples)
for step, xb in enumerate(loader):
if split == 'train':
base_lr = 8e-4
for param_group in opt.param_groups:
t = warmups
d_model = model.embed_size
global_steps = len(loader) * epoch_num + step + 1
lr = (d_model**-0.5) * min(
(global_steps**-.5), global_steps * (t**-1.5))
# lr = 5e-4
param_group['lr'] = lr
if upto and step >= upto:
break
xb = xb.to(get_device()).to(torch.float32)
# Forward pass, potentially through several orderings.
xbhat = None
model_logits = []
num_orders_to_forward = 1
if split == 'test' and nsamples > 1:
# At test, we want to test the "true" nll under all orderings.
num_orders_to_forward = nsamples
for i in range(num_orders_to_forward):
if hasattr(model, 'update_masks'):
# We want to update_masks even for first ever batch.
model.update_masks()
model_out = model(xb)
model_logits.append(model_out)
if xbhat is None:
xbhat = torch.zeros_like(model_out)
xbhat += model_out
if xbhat.shape == xb.shape:
if mean:
xb = (xb * std) + mean
loss = F.binary_cross_entropy_with_logits(
xbhat, xb, size_average=False) / xbhat.size()[0]
else:
if model.input_bins is None:
# NOTE: we have to view() it in this order due to the mask
# construction within MADE. The masks there on the output unit
# determine which unit sees what input vars.
xbhat = xbhat.view(-1, model.nout // model.nin, model.nin)
# Equivalent to:
loss = F.cross_entropy(xbhat, xb.long(), reduction='none') \
.sum(-1).mean()
# NOTE: do NOT use reduction='mean' (default behavior)!
# loss = F.cross_entropy(xbhat, xb.long(), reduction='sum') / xbhat.size()[0]
else:
if num_orders_to_forward == 1:
loss = model.nll(xbhat, xb).mean()
if child:
# Distillation loss
child_loss = model.kl_div(model_out.detach(), child,
child_out)
child_loss = child_loss.mean()
child_ref_loss = child.nll(child_out, xb).mean()
else:
# Average across orderings & then across minibatch.
#
# p(x) = 1/N sum_i p_i(x)
# log(p(x)) = log(1/N) + log(sum_i p_i(x))
# = log(1/N) + logsumexp ( log p_i(x) )
# = log(1/N) + logsumexp ( - nll_i (x) )
#
# Used only at test time.
logps = [] # [batch size, num orders]
assert len(model_logits) == num_orders_to_forward, len(
model_logits)
for logits in model_logits:
# Note the minus.
logps.append(-model.nll(logits, xb))
logps = torch.stack(logps, dim=1)
logps = logps.logsumexp(dim=1) + torch.log(
torch.tensor(1.0 / nsamples, device=logps.device))
loss = (-logps).mean()
losses.append(loss.item())
if step % log_every == 0 and not SILENT:
if split == 'train':
print(
'Epoch {} Iter {}, {} entropy gap {:.4f} bits (loss {:.3f}, data {:.3f}) {:.5f} lr'
.format(epoch_num, step, split,
loss.item() / np.log(2) - table_bits,
loss.item() / np.log(2), table_bits, lr))
if child:
print(
'Epoch {} Iter {}, {} child entropy gap {:.4f} bits {:.5f} lr'
.format(epoch_num, step, split,
child_ref_loss.item() / np.log(2) - table_bits,
lr))
print('Distillation loss | |
files from find_multi_mappers and outputs the uniquely mapping reads
files is a list of filenames containing the output of find_multi_mappers
output is a prefix you'd like prepended to the file containing the uniquely mapping reads
This file will be named as <output>+"_no_multimap_"+<index_num>
"""
output_sam_file = prefix+"_processed_reads.sam"
output_bam_file = prefix+"_processed_reads.bam"
output_handle = open(output_sam_file,'w')
#output_pipe = subprocess.Popen(
# shlex.split(path_to_samtools+"samtools view -S -b -"),
# stdin=subprocess.PIPE,stdout=output_handle)
try:
f = open(reference_fasta+".fai",'r')
except:
print("Reference fasta not indexed. Indexing.")
try:
subprocess.check_call(shlex.split(path_to_samtools+"samtools faidx "+reference_fasta))
f = open(reference_fasta+".fai",'r')
except:
sys.exit("Reference fasta wasn't indexed, and couldn't be indexed. "
+"Please try indexing it manually and running methylpy again.")
#Create sam header based on reference genome
output_handle.write("@HD\tVN:1.0\tSO:unsorted\n")
for line in f:
fields = line.split("\t")
output_handle.write("@SQ\tSN:"+fields[0]+"\tLN:"+fields[1]+"\n")
f.close()
## Merging alignment results of both strands
lines = {}
fields = {}
file_handles = {}
total_unique = 0
for index,filen in enumerate(files):
file_handles[filen]=open(filen,'r')
lines[filen]=file_handles[filen].readline()
fields[filen] = lines[filen].split("\t")[0]#Read ID
while True:
all_fields = [field for field in list(fields.values()) if field != ""]
if len(all_fields) == 0:
break
min_field = min(all_fields)
count_1, count_2 = 0, 0
current_line_1, current_line_2 = "", ""
count= 0
max_mapq, min_mapq = -1000,1000
for key in fields:
while fields[key] == min_field:
count += 1
if int(lines[key].split("\t")[4]) >= max_mapq:
max_mapq = int(lines[key].split("\t")[4])
if(int(lines[key].split("\t")[1]) & 64 == 64): #First in pair
#count_1 += 1
current_line_1 = lines[key]
else:
#count_2 += 1
current_line_2 = lines[key]
if int(lines[key].split("\t")[4]) < min_mapq:
min_mapq = int(lines[key].split("\t")[4])
lines[key]=file_handles[key].readline()
fields[key]=lines[key].split("\t")[0]
#Check if there is only one valid alignment
#if count_1 == 1:
if count == 2 or max_mapq > min_mapq:
output_handle.write(current_line_1)
output_handle.write(current_line_2)
total_unique += 1
#output_pipe.stdin.close()
output_handle.close()
for index,filen in enumerate(files):
file_handles[filen].close()
f = open(output_bam_file,'w')
subprocess.check_call(shlex.split(path_to_samtools+"samtools view -S -b -h "+output_sam_file),stdout=f)
f.close()
subprocess.check_call(shlex.split("rm "+output_sam_file))
return total_unique
def convert_reads_pe(inputf,output,is_read2=False,buffer_line_number=100000):
"""
This function takes a fastq file as input and converts all the cytosines in reads to thymines for
mapping to bisulfite converted genomes. This function also stores an encoding of where the cytosines
were located in the header of each fastq read. See encode_c_positions for more detail.
input is a fastq file for conversion
output is the name of the file you'd like to put the converted reads in
"""
f = open(inputf,'r')
g = open(output,'w')
header = f.readline().rstrip()
header = header.replace(" ","!")
seq = f.readline()
header2 = f.readline()
qual = f.readline()
encoding = encode_c_positions(seq,is_read2=is_read2)
line_counts = 0
out = ""
if is_read2 == False:
while header:
out += header+"!"+encoding+"\n"
converted_seq = seq.replace("C","T")
out += converted_seq
out += header2
out += qual
line_counts += 4
# output
if line_counts > buffer_line_number:
g.write(out)
line_counts = 0
out = ""
# update
header = f.readline().rstrip()
header = header.replace(" ","!")
seq = f.readline()
header2 = f.readline()
qual = f.readline()
encoding = encode_c_positions(seq,is_read2=is_read2)
else:
while header:
out += header+"!"+encoding+"\n"
converted_seq = seq.replace("G","A")
out += converted_seq
out += header2
out += qual
line_counts += 4
# output
if line_counts > buffer_line_number:
g.write(out)
line_counts = 0
out = ""
# update
header = f.readline().rstrip()
header = header.replace(" ","!")
seq = f.readline()
header2 = f.readline()
qual = f.readline()
encoding = encode_c_positions(seq,is_read2=is_read2)
# output
if line_counts > 0:
g.write(out)
line_counts = 0
out = ""
f.close()
g.close()
def quality_trim_pe(inputf_read1, outputf_read1,inputf_read2, outputf_read2,quality_base = None, min_qual_score = 10,
min_read_len = 30,adapter_seq_read1 = "AGATCGGAAGAGCACACGTCTGAAC",
adapter_seq_read2 = "AGATCGGAAGAGCGTCGTGTAGGGA",num_procs = 1, input_format = None,
error_rate = None, max_adapter_removal = None,overlap_length = None, zero_cap = False,
path_to_cutadapt = ""):
"""
Information from cutadapt documentation:
input_format:
Input file format; can be either 'fasta', 'fastq' or 'sra-fastq'. Ignored when reading csfasta/qual files
(default: auto-detect from file name extension).
inputf_read1,inputf_read2:
list of filenames for read 1 and read 2 respectively
outputf_read1,outputf_read2:
Write the modified sequences to these files instead of standard output and send the summary report to
standard output. The format is FASTQ if qualities are available, FASTA otherwise. outputf_read1 and outputf_read2
specify the output filenames of read 1 and read 2 respectively.
adapter_seq_read1:
Sequence of an adapter that was ligated to the 3' end of read 1. The adapter itself and anything that follows is
trimmed.
adapter_seq_read2:
Sequence of an adapter that was ligated to the 3' end of read 2. The adapter itself and anything that follows is
trimmed.
error_rate:
Maximum allowed error rate (no. of errors divided by the length of the matching region) (default: 0.1)
max_adapter_removal:
Try to remove adapters at most COUNT times. Useful when an adapter gets appended multiple times.
overlap_length:
Minimum overlap length. If the overlap between the read and the adapter is shorter than LENGTH, the read
is not modified.This reduces the no. of bases trimmed purely due to short random adapter matches.
min_read_len:
Discard trimmed reads that are shorter than LENGTH. Reads that are too short even before adapter removal
are also discarded. In colorspace, an initial primer is not counted.
min_qual_score:
Trim low-quality ends from reads before adapter removal. The algorithm is the same as the one used by
BWA (Subtract CUTOFF from all qualities; compute partial sums from all indices to the end of the
sequence; cut sequence at the index at which the sum is minimal).
quality_base:
Assume that quality values are encoded as ascii(quality + QUALITY_BASE). The default (33) is
usually correct, except for reads produced by some versions of the Illumina pipeline, where this should
be set to 64.
zero_cap:
Change negative quality values to zero (workaround to avoid segmentation faults in BWA).
path_to_cutadapt:
Path to the folder where cutadapt executable exists. If none, assumes it can be run from current directory
"""
if path_to_cutadapt: #see if cutadapt is installed
if path_to_cutadapt[-1]!="/":
path_to_cutadapt += "/"
path_to_cutadapt += "cutadapt"
try:
devnull = open('/dev/null', 'w')
subprocess.check_call([path_to_cutadapt], stdout=devnull, stderr=devnull)
except OSError:
sys.exit("Cutadapt must be installed to run quality_trim")
except:
devnull.close()
if not isinstance(inputf_read1, list):
if isinstance(inputf_read1, str):
inputf = [inputf_read1]
else:
sys.exit("inputf_read1 must be a list of strings")
if not isinstance(inputf_read2, list):
if isinstance(inputf_read2, str):
inputf = [inputf_read2]
else:
sys.exit("inputf_read2 must be a list of strings")
if not isinstance(outputf_read1, list):
if isinstance(outputf_read1, str):
output = [outputf_read1]
else:
sys.exit("outputf_read1 must be a list of strings")
if not isinstance(outputf_read2, list):
if isinstance(outputf_read2, str):
output = [outputf_read2]
else:
sys.exit("outputf_read2 must be a list of strings")
if len(outputf_read1) != len(inputf_read2) or len(outputf_read1) != len(outputf_read1) or len(outputf_read1) != len(outputf_read2):
sys.exit("Must provide an equal number of input and output files")
base_cmd = path_to_cutadapt
options = " --quiet "
if zero_cap:
zero = "-z "
else:
zero = ""
if input_format:
options += " -f " + input_format
if error_rate:
options += " -e " + str(error_rate)
if max_adapter_removal:
options += " -n " + str(max_adapter_removal)
if overlap_length:
options += " -O " + str(overlap_length)
if min_read_len:
options += " -m " + str(min_read_len)
if min_qual_score:
options += " -q " + str(min_qual_score)
if quality_base:
options += " --quality-base=" + str(quality_base)
options += " -a " + adapter_seq_read1
options += " -A " + adapter_seq_read2
options += " " + zero
if num_procs > 1:
pool = multiprocessing.Pool(num_procs)
#adapter trimming
for current_input_read1,current_output_read1,current_input_read2,current_output_read2 in zip(inputf_read1,outputf_read1,inputf_read2,outputf_read2):
options += " -o " + current_output_read1 + " " + " -p " + current_output_read2 + " "
pool.apply_async(subprocess.check_call,(base_cmd + options + current_input_read1 + " " + current_input_read2,),{"shell":True})
pool.close()
pool.join()
else:
for current_input_read1,current_output_read1,current_input_read2,current_output_read2 in zip(inputf_read1,outputf_read1,inputf_read2,outputf_read2):
options += " -o " + current_output_read1 + " " + " -p " + current_output_read2 + " "
subprocess.check_call(base_cmd + options + current_input_read1 + " " + current_input_read2,shell = True)
def flip_read2_strand(input_file,output_file,path_to_samtools=""):
"""
This function flips the strand of all read2's of mapped paired-end
reads in input bam file
input_file:
Input bam file storing the mapped paired-end reads
output_file:
Output bam file storing the paired-end reads with strand of read 2 flipped
path_to_samtools:
A string of the directory where samtools executive is available. By default,
samtools is assumed to be included in your path (PATH | |
<gh_stars>0
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/)
# Copyright (c) 2020-2021 <NAME> <<EMAIL>>
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
try: clipData
except NameError: clipData = []
prompt_txt = eduMovie.prompt(clear=False)
prompt_len_str = str( len( eduMovie.prompt(color=False) ) + 1 )
code_udp_client = r"""
import socket, select, sys
if len(sys.argv) != 3:
print("USAGE: " + sys.argv[0] + " dstHost dstPort", file=sys.stderr)
exit(1)
dstAddrInfo = socket.getaddrinfo(sys.argv[1], sys.argv[2], type=socket.SOCK_DGRAM)
for x in dstAddrInfo:
try:
sfd = socket.socket(x[0], socket.SOCK_DGRAM)
sfd.sendto("Ala ma kota".encode(), x[4])
except:
if sfd:
sfd.close()
continue
break
while True:
rdfd, _, _ = select.select([sfd], [], [], 33.0)
if sfd in rdfd:
data, sAddr, = sfd.recvfrom(4096)
print("odebrano od", sAddr, ":", data.decode());
else:
print("Timeout!")
break
"""
code_udp_server = r"""
import socket, select, sys
if len(sys.argv) != 2:
print("USAGE: " + sys.argv[0] + " listenPort", file=sys.stderr)
exit(1)
sfd = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
sfd.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
sfd.bind(('::', int(sys.argv[1])))
while True:
rdfd, _, _ = select.select([sfd], [], [], 33.0)
if sfd in rdfd:
data, sAddr, = sfd.recvfrom(4096)
print("odebrano od", sAddr, ":", data.decode());
else:
print("Timeout!")
break
"""
code_udp_server_fmt = eduMovie.code2console(code_udp_server, "py", limit=24)
clipData += [
{ 'comment': 'UDP' },
{
'console': [
[0.0, eduMovie.code2console(code_udp_client, "py", limit=24)],
],
'text' : [
'Pisanie serwera zaczniemy jednak od prostszego przypadku jakim jest UDP. <m>'
'Na ekranie widzimy przykład kodu wysyłającego coś po UDP, <m> można powiedzieć że klienta jakiejś usługi UDP. <m>'
'Jest on dość podobny do klienta TCP, <m> tyle że zamiast tworzenia połączenia wysyłamy od razu dane. <m>'
'Wysyłanie zrealizowane jest wewnątrz bloku try, tak jak connect w TCP. <m>'
'Wynika to z tego, iż w przypadku braku dostępu do danego hosta w TCP <m> zawiedzie funkcja connect, a w UDP zawiedzie dopiero wysyłanie <m> – bo nie mamy nawiązywania połączenia i funkcji connect. <m>'
'Później również oczekujemy na jakąś odpowiedź. <m>'
'Warto jednak zauważyć że funkcja użyta do odebrania odpowiedzi zwraca <m> nie tylko jej treść, jak było to w TCP, ale także adres nadawcy. <m>'
'Nie mamy tutaj połączenia i odpowiedź może przyjść od kogokolwiek. <m>' # zatem często dobrym zwyczajem byłoby sprawdzenie <m> czy taka odpowiedź przyszła od hosta do którego wysłaliśmy zapytanie. <m>'
'Dodatkowo (w odróżnieniu od poprzedniego programu) tym razem <m> adres i port z którym mamy się połączyć przyjmujemy w linii poleceń, <m>'
'a gdy nie został podany to wpisujemy odpowiednią, <m> krótką informację na temat użycia tego programu. <m>'
'Ogólnie polecam w tworzonych programach, które przyjmują jakieś argumenty <m> w linii poleceń umieszczać taki warunek, <m>'
'ponieważ przydaje się on do łatwego przypomnienia sobie po kilku latach <m> jakie to argumenty miał dostać nasz program, <m> bez czytania całego kodu tego programu. <m>'
]
},
{
'console': [
[0.0, "o", "\u001b[?25l\u001b[32m\u001b[8;1H────────────────────────────────────────────────────────────────────────────────\u001b[39m\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b[1;1H\u001b(B\u001b[m" + prompt_txt + "\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\u001b[2B" + prompt_txt + "\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\u001b[2B" + prompt_txt + "\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\u001b[1;" + prompt_len_str + "H\u001b[?12l\u001b[?25h"],
[0.424809, "o", "nc -lup 4444"],
[1.180725, "o", "\r\n"],
[2.069185, "o", "\u001b[?25l\u001b[32m\u001b[6B────────────────────────────────────────────────────────────────────────────────\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[9;" + prompt_len_str + "H\u001b[?12l\u001b[?25h"],
[3.552154, "o", "python3 przykład.py 127.0.0.1 4444"],
[3.956795, "o", "\r\n"],
[3.989271, "o", "\u001b[2dAla ma kota\u001b[10;1H"],
[4.973274, "o", "\u001b[?25l\u001b[32m\u001b[2A────────────────────────────────────────────────────────────────────────────────\u001b[39m\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[2;12H\u001b[?12l\u001b[?25h"],
[5.727499, "o", "k"],
[5.897499, "o", "o"],
[5.97499, "o", "t"],
[6.072499, "o", "a"],
[6.2499, "o", " "],
[6.40299, "o", "a"],
[6.527499, "o", " "],
[6.707499, "o", "n"],
[6.927499, "o", "i"],
[7.127499, "o", "e"],
[7.27499, "o", " "],
[7.427499, "o", "b"],
[7.567499, "o", "o"],
[7.727499, "o", "t"],
[7.847499, "o", "a"],
[7.97499, "o", "?"],
[8.160792, "o", "\r\n"],
[8.161203, "o", "\u001b[7Bodebrano od ('127.0.0.1', 4444) : kota a nie bota?\u001b[3;1H"],
[9.40528, "o", "\u001b[?25l\u001b[32m\u001b[5B────────────────────────────────────────────────────────────────────────────────\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[12;1H\u001b[?12l\u001b[?25h"],
[9.87706, "o", "\u001b[?25l\u001b[4A────────────────────────────────────────────────────────────────────────────────\u001b[32m\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[17;" + prompt_len_str + "H\u001b[?12l\u001b[?25h"],
["findPort + 0.098743", "o", "netstat -a | grep udp"],
["findPort + 0.636533", "o", "\r\n"],
["findPort + 0.666986", "o", "\u001b[31m\u001b[1mudp\u001b(B\u001b[m\u001b[K 0 0 0.0.0.0:bootpc 0.0.0.0:* \r\n\u001b[31m\u001b[1mudp\u001b(B\u001b[m\u001b[K 0 0 localhost:4444 localhost:39106 ESTABLISHED\r\n\u001b[31m\u001b[1mudp\u001b(B\u001b[m\u001b[K 0 0 0.0.0.0:39106 0.0.0.0:* \r\n"],
["findPort + 0.670475", "o", prompt_txt],
["send2client + 2.7", "o", "nc -u 127.0.0.1 "],
["send2client + 2.9", "o", "39106"],
["send2client + 4.080881", "o", "\r\n"],
["send2client + 4.17289", "o", "Ala nic nie ma"],
["send2client + 4.716984", "o", "\r\n"],
["send2client + 4.717369", "o", "\u001b[11Aodebrano od ('127.0.0.1', 60294) : Ala nic nie ma\u001b[23;1H"],
],
'text' : [
'Widzimy że dane wysłane przez naszego klienta UDP zostały odebrane <m> przez netcata, jak również odpowiedzi od netcata dotarły do naszego programu. <m>'
'Wspomnieliśmy że w związku z bezpołączeniowością UDP nasz klient może <m> otrzymać odpowiedź z innych adresów niż ten z którym się łączył. <m>'
'Spróbujmy to sprawdzić. <m>'
'Adres na którym słucha nasz klient można ustalić na kilka sposobów <m> - na przykład podsłuchując wysyłany przez niego pakiet (jest tam adres i port źródłowy) <mark name="findPort" />'
'lub korzystając z polecenia <netstat>[net stat] potrafiącego wypisać <m> wszystkie otwarte połączenia na danym hoście, procesy je obsługujące i tak dalej. <m>'
'Widzimy że najprawdopodobniej jest to 39106, <mark name="send2client" /> zatem spróbujmy coś wysłać na ten numer portu. <m>'
'Jak widzimy nasz klient odebrał te dane, zna też adres ich nadawcy, <m> czyli mógłby wysłać odpowiedź. <m>'
'Zasadniczo gdybyśmy nie musieli "na około" ustalać adresu na którym słucha <m> nasz klient to mógłby on pełnić funkcję serwera usługi UDP. <m>'
]
},
{
'console': [
[0.0, eduMovie.clear + code_udp_server_fmt],
["gniazdo", eduMovie.clear + eduMovie.markLines(code_udp_server_fmt, [6], False, "")],
["gniazdo2", eduMovie.clear + eduMovie.markLines(code_udp_server_fmt, [7], False, "")],
["gniazdo3", eduMovie.clear + eduMovie.markLines(code_udp_server_fmt, [], False, "")],
["bind", eduMovie.clear + eduMovie.markLines(code_udp_server_fmt, [9], False, "")],
["bind2", eduMovie.clear + eduMovie.markLines(code_udp_server_fmt, [], False, "")],
],
'text' : [
'Serwer UDP, którego przykładowy kod widzimy na ekranie, <m> w istocie nie różni się znacząco od klienta. <m>'
'Tym razem nie mamy adresu z którym się łączymy, zatem sami musimy zdecydować <m> czy używamy IPv4 czy IPv6 do słuchania, tworząc odpowiednie gniazdo. <mark name="gniazdo" />'
'W widocznym przykładzie tworzymy gniazdo IPv6 <mark name="gniazdo2" /> i wyłączamy na nim opcje ipv6 only, <m> w efekcie czego będzie odbierało również połączenia IPv4. <mark name="gniazdo3" />'
'Jako że chcemy po prostu słuchać, <m> a nie słuchać w oczekiwaniu na odpowiedź na coś co wysłaliśmy to musimy <m> samodzielnie wskazać adres ip i numer portu na którym słuchamy. <mark name="bind" />'
'W tym celu wywołujemy funkcję bind, której przekazujemy te dane <m> i rozpoczyna ona odbiór danych na wskazanym adresie i porcie. <m>'
'Oczywiście pod warunkiem że wskazany port na wskazanym adresie <m> nie jest zajęty przez inny słuchający na nim program <m> oraz że mamy prawo słuchać na danym porcie. <m>'
'(typowo portów o numerach niższych niż 1024 może używać tylko root). <m>'
'W widocznym przykładzie podajemy adres IP złożony z samych zer, <m> co oznacza że słuchamy na wszystkich adresach hosta, <m> na którym został uruchomiony ten program. <m>'
'Jawnie wskazujemy też numer portu używanego do odbioru połączeń, <m> gdyż w przypadku serwera chcemy żeby była to dobrze określona wartość, <m> a nie coś co musimy jakoś ustalić po jego uruchomieniu. <mark name="bind2" />'
'Pętla odbierająca wygląda tak samo jak wcześniej w kliencie. <m>'
]
},
{
'console': [
[0.0, "o", "\u001b[?25l\u001b[32m\u001b[8;1H────────────────────────────────────────────────────────────────────────────────\u001b[39m\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b[1;1H\u001b(B\u001b[m" + prompt_txt + "\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\u001b[2B" + prompt_txt + "\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\u001b[2B" + prompt_txt + "\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\r\n\u001b[K\u001b[1;" + prompt_len_str + "H\u001b[?12l\u001b[?25h"],
[0.569514, "o", "python3 przykład.py 4444"],
[1.586711, "o", "\r\n"],
[1.990973, "o", "\u001b[?25l\u001b[32m\u001b[6B────────────────────────────────────────────────────────────────────────────────\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[9;" + prompt_len_str + "H\u001b[?12l\u001b[?25h"],
[2.355447, "o", "nc -u ::1 4444"],
[3.138404, "o", "\r\n"],
[4.522273, "o", "test 1"],
[5.202252, "o", "\r\n"],
[5.202648, "o", "\u001b[2dodebrano od ('::1', 34195, 0, 0) : test 1\u001b[11;1H"],
[6.595093, "o", "\u001b[?25l\u001b[3A────────────────────────────────────────────────────────────────────────────────\u001b[32m\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[17;" + prompt_len_str + "H\u001b[?12l\u001b[?25h"],
[7.156072, "o", "nc -u 127.0.0.1 4444"],
[7.8546406, "o", "\r\n"],
[9.766638, "o", "test 2"],
[10.938556, "o", "\r\n"],
[10.938924, "o", "\u001b[4dodebrano od ('::ffff:127.0.0.1', 56277, 0, 0) : test 2\u001b[19;1H"],
[11.547179, "o", "\u001b[?25l\u001b[32m\u001b[8d────────────────────────────────────────────────────────────────────────────────\u001b[16;1H────────────────────────────────────────────────────────────────────────────────\u001b(B\u001b[m\u001b[11;1H\u001b[?12l\u001b[?25h"],
[12.434616, "o", "test 3"],
[13.490627, "o", "\r\n"],
[13.490945, "o", "\u001b[1;7r\u001b[7;80H\n\u001b[5;1Hodebrano od ('::1', 34195, 0, 0) : test 3\u001b[1;24r\u001b[12;1H"],
],
'text' : [
'Możemy uruchomić nasz serwer UDP i zobaczyć go w działaniu. <m>'
'Widzimy że odbiera zarówno połączenia po IPv6, jak i po IPv4. <m>'
'Poprawnie identyfikuje też klienta od którego otrzymał daną informację, <m> więc mógłby odesłać do | |
#coding:utf8
#authors : yqq
import logging
import json
from utils import decimal_default,get_linenumber
from base_handler import BaseHandler
from .proxy import AuthServiceProxy
from cashaddress import convert
import traceback
#设置精度
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
from constants import BCH_RPC_URL as RPC_URL
STR_ADDRESS_TABLE = "t_btc_address"
class BTC_ListAccounts(BaseHandler):
@staticmethod
def addresses():
from sql import run
accounts = run("""select * from {};""".format(STR_ADDRESS_TABLE)) #TODO:后期数据量大的时候, 使用redis进行缓存地址
return [account['address'] for account in accounts]
def get(self):
try:
data = BTC_ListAccounts.addresses()
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_ListAccounts error:{0} in {1}".format(e,get_linenumber()))
g_exUserAddrs = BTC_ListAccounts.addresses() #使用全局变量保存交易所用户BTC地址 2019-06-01
class BTC_GetAccount(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccount",self.get_argument("address")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccount error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountAddress(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
commands = [["getaccountaddress",self.get_argument("account")]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccoutAddress error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetAccountBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
account = self.get_argument("account").decode("utf-8")
if account is None or len(account) == 0:
self.write(json.dumps(BaseHandler.error_ret()))
return
commands = [["getbalance", account]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetAccountBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_GetBalance(BaseHandler):
def get(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection, addr)
if not data:
self.write(json.dumps(BaseHandler.error_ret_with_data("0")))
return
from utils import accumulate
self.write(json.dumps(BaseHandler.success_ret_with_data('%.8f' % accumulate(data)), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetBalance error:{0} in {1}".format(e,get_linenumber()))
class BTC_ListUTXO(BaseHandler):
@staticmethod
def utxo(rpcconn, addrs, minconf=1, maxconf=9999999, opt=None):
argAddrs = addrs if isinstance(addrs, list) else [addrs]
if opt == None:
commands = [["listunspent", minconf, maxconf, argAddrs, True]]
else:
commands = [["listunspent", minconf, maxconf, argAddrs, True, opt]]
utxos = rpcconn.batch_(commands)[0]
#要进行地址格式的转换
for i in range(len(utxos)):
cashAddr = utxos[i]['address']
legacyAddr = convert.to_legacy_address(cashAddr)
utxos[i]['address'] = legacyAddr
utxos[i]['cashaddress'] = cashAddr
return utxos
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
data = None
try:
minconf = int(self.get_argument("minconf")) if not self.get_argument("minconf") == "" else 1
maxconf = int(self.get_argument("maxconf")) if not self.get_argument("maxconf") == "" else 9999999
addr = self.get_argument("address")
data = BTC_ListUTXO.utxo(btc_rpc_connection,addr,minconf,maxconf)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_GetUTXO error:{0} in {1}".format(e,get_linenumber()))
class BTC_EstimateSmartFee(BaseHandler):
@staticmethod
def process(rpcconn, nConfTarget=2, strEstimateMode='ECONOMICAL'):
# commands = [["estimatesmartfee", nConfTarget, strEstimateMode ]]
# commands = [["estimatefee"]] # bch和 btc , ltc 不一样
# data = rpcconn.batch_(commands)
# nFeeRate = data[0] if len(data) > 0 else Decimal(0.00001)
# return nFeeRate * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
# if len(data) > 0:
# return data[0]['feerate'] * 100000000 / 1000 # satoshi/Byte 即 in satoshis per byte
return 20
@staticmethod
def calcFee(rpcconn, nIn=1, nOut = 2):
from decimal import Decimal
from decimal import getcontext
getcontext().prec = 8
rate = BTC_EstimateSmartFee.process(rpcconn)
rate = "%.8f" % (rate / Decimal(100000000.0))
return Decimal(str((148 * nIn + 34 * nOut + 10))) * Decimal(rate)
def get(self):
try:
rpcconn = AuthServiceProxy(RPC_URL)
data = BTC_EstimateSmartFee.calcFee(rpcconn)
data = '%.8f' % data
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % e)))
logging.error("BTC_EstimateSmartFee error:{0} in {1}".format(e, get_linenumber()))
pass
class BTC_CreateRawTransaction(BaseHandler):
@staticmethod
def process(rpcconn,from_addr,to_addr,amount):
#utxos
utxos = BTC_ListUTXO.utxo(rpcconn, from_addr)
#print(utxos)
def UtxoFilter(utxos, amount):
selected = []
from decimal import Decimal
nSum = Decimal('0')
#最小输入utxo金额 : 148 * rate 其中rate是 1000字节 所需的btc数量
nFee = Decimal('0.0')
for utxo in [item for item in utxos if int(item["confirmations"]) >= 1 and float(item["amount"]) > 0.0003 ]:
selected.append(utxo)
nSum += Decimal(str((utxo["amount"])))
if nSum > Decimal(str(amount)):
nFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum > nFee + amount:
break
return selected, nSum, nFee
selected, nSum , fee = UtxoFilter(utxos, amount)
# check if enough
# from utils import calcFee
if not isinstance(amount, Decimal):
amount = Decimal(str(amount))
# fee = BTC_EstimateSmartFee.calcFee(rpcconn, len(selected), 2)
if nSum < fee + amount:
return False,"budget not enough"
#return False,0 #需测试!!!
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in selected]
param_out = {to_addr:amount, from_addr: nSum - amount - fee}
#print("--------------param_out-------------")
#print("fee" + str(fee))
#print(param_in)
#print(param_out)
#print("--------------param_out-------------")
# create raw transaction
commands = [["createrawtransaction",param_in,param_out]]
return True, {"hex":rpcconn.batch_(commands), "utxos":selected, "txout":param_out}
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
from_addr = self.get_argument("from")
to_addr = self.get_argument("to")
#amount = float(self.get_argument("amount"))
from decimal import Decimal
amount = Decimal(str(self.get_argument("amount")))
ret, rsp = BTC_CreateRawTransaction.process(btc_rpc_connection,from_addr,to_addr,amount)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreatRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_SendRawTransaction(BaseHandler):
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
rawdata = self.get_argument("rawdata")
if not rawdata: return
commands = [["sendrawtransaction",rawdata]]
data = btc_rpc_connection.batch_(commands)
self.write(json.dumps(BaseHandler.success_ret_with_data(data), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_SendRawTransaction error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx(BaseHandler):
@staticmethod
def genearateInParam(rpcconn, src, dest):
utxos,gross,amount = [],Decimal('0'),sum(dest.values())
redundant = 0
for addr in src:
# utxos
all = BTC_ListUTXO.utxo(rpcconn, addr)
# recommend
from utils import recommended
selected,aggregate = recommended(all,amount)
# process
utxos += selected
gross += aggregate
# check if enough
redundant = gross - BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(dest.keys())+1) - amount
if redundant > 0:
return True,utxos,redundant
return False,utxos,redundant
@staticmethod
def generateOutParam(dest):
param_out = {}
for key,value in dest.items():
param_out[key] = Decimal(value) if isinstance(value, str) else Decimal(str(value))
return param_out
@staticmethod
def process(rpcconn, src, dest ):
# preprocess
param_out = BTC_CreateRawTransactionEx.generateOutParam(dest)
ret,utxos,redundant = BTC_CreateRawTransactionEx.genearateInParam(rpcconn,src,param_out)
if not ret: return False, "budget not enough"
# param_out refinement
param_out[src[0]] = redundant if src[0] not in param_out.keys() else param_out[src[0]] + redundant
#print(param_out)
# param_in refinement
from utils import filtered
param_in = [filtered(item,["txid","vout"]) for item in utxos]
#print(param_in)
return True, {"hex":rpcconn.batch_([["createrawtransaction",param_in,param_out]]),"utxos":utxos, "txout":param_out}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, dict):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json object"))))
return
ret, rsp = BTC_CreateRawTransactionEx.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
class BTC_CreateRawTransactionEx_Collection(BaseHandler):
@staticmethod
def makeParams( rpcconn, lstSrc, lstDest):
if len(lstSrc) == 1 and lstSrc[0].strip() == "*":
lstSrcAddrs = g_exUserAddrs
else:
lstSrcAddrs = lstSrc
utxos, nSum = [], Decimal('0')
txAmount, fTxFee = 0, 0
#for addr in lstSrc:
if isinstance(lstSrc, list):
# bitcoin-cli -conf=/root/.bitcoin/bitcoin-test.conf listunspent 0 9999999 '[]' true '{ "minimumAmount": 0.005 }'
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
opt = {'minimumAmount':0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [ ], 1, 9999999, opt)
# print(len(lstUtxos))
for utxo in lstUtxos:
if utxo['address'].strip() in lstSrcAddrs:
utxos.append(utxo)
nSum += Decimal(str((utxo["amount"])))
fTxFee = BTC_EstimateSmartFee.calcFee(rpcconn, len(utxos), len(lstDest))
txAmount = nSum - fTxFee #实际转账金额
if txAmount <= 0.0003: #实际转账金额太小
return False, None, 0, 0
return True, utxos, txAmount , fTxFee
@staticmethod
def process(rpcconn, lstSrc, lstDest):
#lstSrcAddrs = []
bRet, utxos, txAmount, fTxFee = BTC_CreateRawTransactionEx_Collection.makeParams(rpcconn, lstSrc, lstDest)
if not bRet:
return False, "collection amount is too small!"
strDst = lstDest[0]
vout = {strDst : txAmount}
from utils import filtered
vin = [filtered(item,["txid","vout"]) for item in utxos]
strHex = rpcconn.batch_([["createrawtransaction", vin, vout]])
return True, {"hex": strHex, "utxos":utxos, "txout":vout, "txFee":fTxFee}
def get_argument_ex(self, str):
from utils import json2dict
str2dict = json2dict(self.request.body)
return str2dict[str] if str in str2dict.keys() else False
def post(self):
btc_rpc_connection = AuthServiceProxy(RPC_URL)
try:
src = self.get_argument_ex("src")
dest = self.get_argument_ex("dest")
if not isinstance(src, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("src must be json list"))))
return
if not isinstance(dest, list):
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s" % ("dest must be json list"))))
return
ret, rsp = BTC_CreateRawTransactionEx_Collection.process(btc_rpc_connection, src, dest)
if not ret:
self.write(json.dumps(BaseHandler.error_ret_with_data(rsp)))
return
self.write(json.dumps(BaseHandler.success_ret_with_data(rsp), default=decimal_default))
except Exception as e:
# traceback.print_exc()
logging.error("BTC_CreateRawTransactionEx error:{0} in {1}".format(e,get_linenumber()))
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
#查询需要归集的地址余额
class BTC_CollectionQuery(BaseHandler):
def get(self):
rpcconn = AuthServiceProxy(RPC_URL)
try:
# commands = [["listunspent", 0, 99999999, [], True, {'minimumAmount':0.0003}]]
# lstUtxos = rpcconn.batch_(commands)[0]
opt = {'minimumAmount': 0.0003}
lstUtxos = BTC_ListUTXO.utxo(rpcconn, [], 1, 9999999, opt)
mapRet = {}
for utxo in lstUtxos:
strAddr = utxo['address'].strip()
if strAddr not in g_exUserAddrs : continue
if strAddr not in mapRet:
mapRet[strAddr] = Decimal("0.0")
nAmount = utxo['amount']
mapRet[strAddr] = str( nAmount + Decimal( mapRet[strAddr]) )
self.write(json.dumps(BaseHandler.success_ret_with_data(mapRet), default=decimal_default))
except Exception as e:
self.write(json.dumps(BaseHandler.error_ret_with_data("error: %s"%e)))
logging.error("BTC_CollectionQuery error:{0} in {1}".format(e, get_linenumber()))
class BTC_ListTransactions(BaseHandler):
@staticmethod
def blktimes(rpc_connection,account="*",tx_counts=10):
commands = [["listtransactions",account,tx_counts]]
data = rpc_connection.batch_(commands)
if len(data) == 0: return []
| |
<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: ASCII -*-
"""
Methods called by the spoca_hfc_processing and spoca_hfc_classes modules.
@author: <NAME> (CNRS, LESIA)
@modified by: <NAME> (Obs.Paris, LESIA)
"""
import os
import sys
import time
from datetime import datetime, timedelta
import csv
import re
import cStringIO
import urllib2
import sqlite3
import suds
import copy
import tempfile
#import pyfits
import PIL
from PIL import ImageEnhance
import numpy as np
from sunpy.net import vso
from ssw import tim2jd
from astropy.io.votable import parse, is_votable
from astropy.io import fits
from scipy import ndimage
#sys.path.append('/home/ytaoufiq/SPOCA-IAS/SPOCA-AR/spoca/hfc/src/')
import sunpy
import sunpy.map
import sunpy.map.sources
import sunpy.instr
import sunpy.instr.aia
#from memory_profiler import profile
import sdo_client_idoc
import logging
from bs4 import BeautifulSoup
from jsoc_client import jsoc
# Import spoca hfc global variables
try:
from spoca_hfc_globals import LOG, MEDOC_HQI_WSDL, \
HFC_TFORMAT, MEDOC_TFORMAT, AIA_WAVES, \
VSO_TFORMAT, SDAC_URL, TODAY, JSOC_IN_TFORMAT, \
JSOC_OUT_TFORMAT, JSOC_URL, SAIO_URL, SAIO_TFORMAT, \
INPUT_TFORMAT, DATA_DIRECTORY, OUTPUT_DIRECTORY, \
MEDOC_URL, VSO_URL, BATCH_DIRECTORY, SDO_URL, \
FITS_TFORMAT
except:
sys.exit("Import failed in module spoca_hfc_methods :\
\n\tspoca_hfc_globals module is required!")
# Import sdo_client_idoc variables
try:
from sdo_client_idoc import search
except:
sys.exit("Import failed in module spoca_hfc_methods :\
\n\tsdo_client_idoc module is required!")
# Path definition
CURRENT_DIRECTORY = os.getcwd()
def query_medoc(instrument,
tstart=None, tend=None,
sample=None,
min_wave=None, max_wave=None):
"""
This method allows to query the MEDOC HELIO Interface.
"""
instrument = instrument.upper()
if (instrument == "EIT"):
table = "soho_view"
where_clause = "instrument," + instrument
if (min_wave):
where_clause += ";wavemin," + str(min_wave) + "/"
if (max_wave):
where_clause += ";wavemax,/" + str(max_wave)
if (tstart):
starttime = tstart.strftime(HFC_TFORMAT)
if (tend):
endtime = tend.strftime(HFC_TFORMAT)
medoclist = []
try:
client = suds.client.Client(MEDOC_HQI_WSDL)
except:
LOG.error("Can not query MEDOC HQI!")
else:
votable = getattr(client.service, "Query")(
STARTTIME=starttime,
ENDTIME=endtime,
FROM=table,
WHERE=where_clause)
medoc_header, medoc_data = parse_votable(votable)
# print medoc_data
for current_row in medoc_data:
if ('T' in current_row['date_obs']):
current_time_start = datetime.strptime(
current_row['date_obs'],
HFC_TFORMAT)
else:
datetime.strptime(current_row['date_obs'], MEDOC_TFORMAT)
current_dict = {'fileid': current_row['address'],
'time_start': current_time_start,
'provider': "MEDOC",
'filename':
os.path.basename(
current_row['address']),
'min_wave': current_row['wavemin'],
'max_wave': current_row['wavemax']}
if (medoclist.count(current_dict) == 0):
medoclist.append(current_dict)
return medoclist
# Method to query idoc server (only for AIA 1 min data)
def query_idoc(tstart, tend, cadence=['1 min'],
waves=AIA_WAVES, local=False):
"""
This method allows to query the IDOC SDO server.
"""
# Case where input cadence is in seconds (int)
if (type(cadence[0]) == int):
cadence = cadence[0] / 60
if (cadence <= 30):
cadence = [str(cadence) + "m"]
elif (cadence > 30) and (cadence <= 12 * 60):
cadence = [str(cadence / 60) + "h"]
else:
cadence = [str(cadence / (60 * 24)) + "d"]
# LOG.info("Cadence is %s",cadence[0])
idoclist = []
sdo_data_list = search(DATES=[tstart, tend],
WAVES=waves,
CADENCE=cadence)
# Modif Pablo 2014-10-23 - Xavier 2015-03-05
for current_row in sdo_data_list:
if (local):
date_str = str(current_row.date_obs).split()[0]
time_str = str(current_row.date_obs).split()[1]
if '.' in time_str:
ms = time_str.split(".")[1]
time_str = time_str.split(".")[0]
ms_str = ms[0:2]
else:
ms_str = '00'
current_fileid = current_row.ias_location + \
"/S00000/image_lev1.fits"
current_outputfilename = 'aia.lev1.' + str(current_row.wave) + \
'A_' + date_str + 'T' + time_str + "." + \
str(ms_str) + 'Z.image_lev1.fits'
else:
current_fileid = current_row.url
current_outputfilename = None
# current_dict = {'fileid': ,
current_dict = {'fileid': current_fileid,
'time_start': current_row.date_obs,
'provider': "IDOC", 'filename': None,
'min_wave': current_row.wave,
'max_wave': current_row.wave,
'output_filename': current_outputfilename}
if (idoclist.count(current_dict) == 0):
idoclist.append(current_dict)
return idoclist
def query_vso(instrument=None,
tstart=None, tend=None,
sample=None, pixels=None,
resolution=1,
min_wave=None, max_wave=None,
unit_wave="Angstrom",
nday=20):
"""
This method allows to query the VSO server.
"""
vsolist = []
# Query vso
client = vso.VSOClient()
current_tstart = tstart
loop = True
while (loop):
current_tend = current_tstart + timedelta(days=nday)
# LOG.info(str(current_tstart)+" - "+str(current_tend))
# Get file list at the first wavelength
try:
vso_resp = client.query(vso.attrs.Time(current_tstart,
current_tend),
vso.attrs.Instrument(instrument),
vso.attrs.Sample(sample),
vso.attrs.Resolution(resolution),
vso.attrs.Pixels(pixels),
vso.attrs.Wave(min_wave, max_wave,
waveunit=unit_wave))
except:
LOG.error("Querying vso server has failed!")
loop = False
else:
if (vso_resp):
tstart_i = []
for resp in vso_resp:
time_start = datetime.strptime(resp.time.start,
VSO_TFORMAT)
if (time_start > tend):
loop = False
break
if (float(resp.wave.wavemax) != float(max_wave)) or \
(float(resp.wave.wavemin) != float(min_wave)):
continue
current_row = {'fileid': SDAC_URL + resp.fileid,
'time_start': time_start,
'provider': resp.provider, 'filename': None,
'min_wave': resp.wave.wavemax,
'max_wave': resp.wave.wavemin}
if (vsolist.count(current_row) == 0):
vsolist.append(current_row)
tstart_i.append(time_start)
if (len(tstart_i) > 0):
if (max(tstart_i) > current_tstart):
current_tend = max(tstart_i)
current_tstart = current_tend
if (current_tstart >= tend):
loop = False
return vsolist
def query_jsoc(ds, starttime, endtime,
wavelength=None,
timeout=180):
"""
This method allows to query the JSOC AIA server.
"""
# Define starttime and endtime (in jsoc cgi time format)
#stime = starttime - timedelta(seconds=10) # starttime - 10 sec.
#etime = endtime + timedelta(seconds=10) # endtime + 10 sec.
stime = starttime
etime = endtime
stime = stime.strftime(JSOC_IN_TFORMAT)
etime = etime.strftime(JSOC_IN_TFORMAT)
if (ds == "aia.lev1"):
ds_id = "aia__lev1"
url = JSOC_URL + "/cgi-bin/ajax/show_info"
url = url + "?ds=" + ds + "[" + stime + "-" + etime + "]"
if (wavelength):
url = url + "[?WAVELNTH=" + str(wavelength) + "?]"
url = url + "&key=T_REC_index%2CT_OBS%2CWAVELNTH"
try:
LOG.info("Querying " + url)
f = urllib2.urlopen(url, None, timeout)
except urllib2.URLError, e:
LOG.error("Can not open %s", url)
LOG.error(e)
return []
else:
flist = f.read().split("\n")[1:-1]
jsoclist = []
for current_row in flist:
current_items = current_row.split()
current_outputfilename = ds + '.' + str(current_items[2]) + \
'A_' + str(current_items[1]) + '.image_lev1.fits'
current_fileid = VSO_URL + "/cgi-bin/netdrms/drms_export.cgi?series="+ds_id
current_fileid+=";compress=rice"
record = str(current_items[2]) + '_' + str(current_items[0])+"-"+str(current_items[0])
current_fileid = current_fileid+";record="+record
if (wavelength):
if (float(current_items[2]) != float(wavelength)):
continue
jsoclist.append({'fileid': current_fileid, 'filename': None,
'time_start':
datetime.strptime(current_items[1],
JSOC_OUT_TFORMAT),
'provider': "JSOC",
'min_wave': current_items[2],
'max_wave': current_items[2],
'output_filename': current_outputfilename})
return jsoclist
def query_jsoc2(ds, starttime, endtime,
wavelength=None,
timeout=180):
"""
This method allows to query the JSOC AIA server.
"""
stime = starttime
etime = endtime
stime = stime.strftime(JSOC_IN_TFORMAT)
etime = etime.strftime(JSOC_IN_TFORMAT)
j_soc = jsoc(ds, realtime=True, starttime=starttime, endtime=endtime, wavelength = wavelength, notify='<EMAIL>')
info = j_soc.show_info(key=["T_REC_index", "T_REC", "WAVELNTH"])
jsoclist = []
for current_row in info.split("\n")[1:-1]:
current_items = current_row.split()
current_outputfilename = ds + '.' + str(current_items[2]) + 'A_' + str(current_items[1]) + '.image_lev1.fits'
if (wavelength):
if (float(current_items[2]) != float(wavelength)):
continue
jsoclist.append({'fileid': None, 'filename': None,
'time_start':
datetime.strptime(current_items[1],
'%Y-%m-%dT%H:%M:%SZ'),
'provider': "JSOC",
'min_wave': current_items[2],
'max_wave': current_items[2],
'output_filename': current_outputfilename})
return jsoclist
def query_AIA_RT_data(wavelength=None, timeout=180):
"""
This method allows to query the LMSAL AIA server for real time AIA data
"""
urlBase = "http://sdowww.lmsal.com/sdomedia/SunInTime/mostrecent/"
# build all YYYY/MM/DD string from between start and end time
if (wavelength):
extension = "_%04d.fits" % (wavelength)
else:
extension = '.fits'
fileList = []
try:
url = urlBase
LOG.info("Querying " + url)
f = urllib2.urlopen(url, None, timeout)
except urllib2.URLError, e:
LOG.error("Can not open %s", url)
LOG.error(e)
else:
soup = BeautifulSoup(f, 'html.parser')
tmp = [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(extension)]
for f in tmp:
fileList.append(f)
# find the most recent file
currentTime = datetime(1950, 1, 1)
latestFile = ''
for current_row in fileList:
current_outputfilename = os.path.basename(current_row)
time_start = datetime.strptime(current_outputfilename, 'AIA%Y%m%d_%H%M%S' + extension)
if time_start > currentTime:
latestFile = current_row
lmsalList = []
current_outputfilename = os.path.basename(latestFile)
current_fileid = latestFile
time_start = datetime.strptime(current_outputfilename, 'AIA%Y%m%d_%H%M%S' + extension)
# build target filename which will be used for download
filename = 'aia.lev1.' + str(int(wavelength)) + 'A_' + time_start.strftime('%Y-%m-%dT%H:%M:%S') + '.image_lev1.fits'
lmsalList.append({'fileid': current_fileid,
'filename': filename,
'time_start': time_start,
'provider': urlBase,
'min_wave': wavelength,
'max_wave': wavelength,
'output_filename': filename})
# 'output_filename': current_outputfilename})
return lmsalList
def query_AIA_lmsal(starttime, endtime,
wavelength=None,
timeout=180):
"""
This method allows to query the LMSAL AIA server for real time AIA data
"""
stime = starttime
etime = endtime
urlBase = "http://sdowww.lmsal.com/sdomedia/SunInTime/"
# build all YYYY/MM/DD string from between start and end time
datePartList = []
delta = timedelta(days=1)
while stime <= etime:
datePartList.append("%4s/%2s/%2s" % (stime.strftime("%Y"), stime.strftime("%m"), stime.strftime("%d")))
stime = stime + delta
if (wavelength):
extension = "_%04d.fits" % (wavelength)
else:
extension = '.fits'
fileList = []
for dt in datePartList:
try:
url = urlBase + dt
print("Querying " + url)
LOG.info("Querying " + url)
f = urllib2.urlopen(url, None, timeout)
except urllib2.URLError, e:
LOG.error("Can not open %s", url)
LOG.error(e)
else:
soup = BeautifulSoup(f, 'html.parser')
tmp = [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(extension)]
for f in tmp:
fileList.append(f)
lmsalList = []
for current_row in fileList:
current_outputfilename = os.path.basename(current_row)
current_fileid = current_row
lmsalList.append({'fileid': current_fileid,
'filename': current_outputfilename,
'time_start': datetime.strptime(current_outputfilename, 'AIA%Y%m%d_%H%M%S' + extension),
'provider': urlBase,
'min_wave': wavelength,
'max_wave': wavelength,
'output_filename': current_outputfilename})
return lmsalList
def query_saio(instrument="EIT",
begindate=(TODAY - timedelta(days=1)),
enddate=TODAY,
min_wave=171.0, max_wave=171.0,
resolution=1024,
return_type="VOTABLE"):
"""
Method to query the SAIO server, visit:
http://ssa.esac.esa.int/ssa/aio/html/home_main.shtml
"""
url_prod = SAIO_URL + "/product-action?"
url_meta = SAIO_URL + "/metadata-action?"
url_meta += "RESOURCE_CLASS=OBSERVATION&SELECTED_FIELDS=OBSERVATION"
url_meta += "&QUERY=INSTRUMENT.NAME=%s" % (quote(instrument.upper(),
single=True))
url_meta | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 17 02:02:36 2020
@author: admin
"""
##Functions for replication study
# Import modules.
import pandas as pd
import statsmodels.api as sm
import numpy as np
from scipy import stats
from linearmodels import IV2SLS
# Get significance asterix.
def significance(pval):
if type(pval) == str:
star = ''
elif pval <= 0.01:
star = '***'
elif pval <= 0.05:
star = '**'
elif pval <= 0.1:
star = '*'
else:
star = ' '
return star
# Generate cohort dummies.
def get_cohort_dummy(df, col, c):
'''
Inputs are
a DataFrame,
a column col (string), and
an input c (cohort) for which the output variable shall return 1.
newcol
'''
#Load data.
#path = ('data/Crime.dta')
#df = pd.read_stata(path)
# Get name of cohort dummy c.
newcol = 'cohort_' + f'{c}'
# Define a function that creates a dummy var. conditional on another column.
def dummy_mapping(x):
if x == c:
return 1
elif x == np.nan:
return np.nan
else:
return 0
df[newcol] = df[col].apply(dummy_mapping)
return df
# Set up data frame and variables for regressions.
def get_variables():
'''
'''
# Load data.
path = ('data/Crime.dta')
df = pd.read_stata(path)
# For the regressions below, add a constant to the data frame.
df['constant'] = 1
# Get a variable representing the strings to add them to regression functions.
constant = ['constant']
highnumber = ['highnumber']
conscription = ['sm']
crimerate = ['crimerate']
malvinas = ['malvinas']
navy = ['navy']
# Get list of origin dummy names. Omit 'argentine' i.o.t. avoid multicollinearity.
origin = ['naturalized', 'indigenous']
# Get cohort dummies from 1929 to 1965.
for year in list(range(1929, 1966, 1)):
get_cohort_dummy(df=df, col='cohort', c=year)
# Get list of cohort dummy names.
cohort_years = list(range(1930, 1966, 1)) # Omit cohort_1929 (multicollinearity).
cohorts = []
for i in cohort_years:
cohorts.append('cohort_' + f'{i}')
# Get list of district dummy names. District dummies have already been provided in data.
district_numbers = list(range(2, 25, 1)) # Omit dist1 (multicollinearity).
districts = []
for i in district_numbers:
districts.append('dist' + f'{i}')
# Generate variable hn_malvinas: interaction term between highnumber and malvinas.
df['hn_malvinas'] = df.highnumber*df.malvinas
hn_malvinas = ['hn_malvinas']
return constant, highnumber, conscription, crimerate, malvinas, navy, origin, cohorts, districts, hn_malvinas, df
# Get plot as in figure A.1.
def binned_plot(df, bin_num, ylim, years):
'''
Returns plots for failure rates of medical examination. To smooth out fluctuations, data can be partitioned into bin_num bins.
For each bin the mean of failure rate is computed. Number of plots returned depends on number of cohorts desired.
bin_num: int, number of bins
ylim: list/2-tuple, range of y-axis of plots
years: list of cohorts
'''
bins = np.linspace(0, 1000, bin_num+1)
for i in years:
binned_stats = stats.binned_statistic(x=df[df.cohort == i].draftnumber, values=df[df.cohort == i].enfdummy,
statistic='mean', bins=bins)
df_bin = pd.DataFrame()
df_bin['Failure rate'] = binned_stats.statistic
df_bin['Draftnumber'] = bins[1: bin_num+1]
df_bin.plot.line(x='Draftnumber', y='Failure rate', title=f'Failure Rates for Cohort {i}', ylim=ylim)
# Regressions (initially for table 4).
def regress(df, method, cohort_range, cohort_dummies, controls):
'''
df: data frame to use.
method: string, either 'IV' for IV2SLSL by linearmodels or 'OLS' for OLS by statsmodels.
cohort_range: list/2-tuple, indicating first and last cohort.
cohorts: cohort dummies to include, for 1958-'62: cohorts=cohorts[29: 33].
controls: string, either 'y' or 'n'.
'''
constant, highnumber, conscription, crimerate, malvinas, navy, origin, cohorts, districts, hn_malvinas, df = get_variables()
if method == 'OLS':
if controls == 'y':
vars_controls = highnumber + cohort_dummies + origin + districts + constant
df = df[(df.cohort >= cohort_range[0]) & (df.cohort <= cohort_range[1])][vars_controls + crimerate].dropna().copy()
X = df[vars_controls].copy()
y = df.loc[:, 'crimerate']
rslts = sm.OLS(y, X).fit()
return rslts
if controls == 'n':
vars_no_controls = highnumber + cohort_dummies + constant
df = df[(df.cohort >= cohort_range[0]) & (df.cohort <= cohort_range[1])][vars_no_controls + crimerate].dropna().copy()
X = df[vars_no_controls].copy()
y = df.loc[:, 'crimerate']
rslts = sm.OLS(y, X).fit()
return rslts
if method == 'IV':
if controls == 'y':
cohorts=cohorts[29: 33]
vars_controls = highnumber + conscription + cohort_dummies + origin + districts + constant
df = df[(df.cohort >= cohort_range[0]) & (df.cohort <= cohort_range[1])][vars_controls + crimerate].copy().dropna(axis=0)
y = df.loc[:, 'crimerate']
rslts = IV2SLS(y, df[constant + cohorts + origin + districts], df['sm'], df['highnumber']).fit()
return rslts
if controls == 'n':
cohorts=cohorts[29: 33]
vars_no_controls = highnumber + conscription + cohort_dummies + constant
df = df[(df.cohort >= cohort_range[0]) & (df.cohort <= cohort_range[1])][vars_no_controls + crimerate].copy().dropna(axis=0)
y = df.loc[:, 'crimerate']
rslts = IV2SLS(y, df[constant + cohorts], df['sm'], df['highnumber']).fit()
return rslts
# Regressions for table 4.
def regressions_table_4(df):
'''
Function returns regression results as in table 4 in Galiani et al. 2011.
First, it computes the estimates.
Arguments:
df: data frame to use.
'''
constant, highnumber, conscription, crimerate, malvinas, navy, origin, cohorts, districts, hn_malvinas, df = get_variables()
# Lists to store estimates, standard errors, no. of obs, percent change, and whether controls were used.
est_sm = []
est_hn = []
std_hn = []
std_sm = []
pval_sm = []
pval_hn = []
percent_change = []
num_obs = []
# For computing percent change:
p1 = df['sm'][df.cohort >= 1958][df.cohort <= 1962][df.highnumber == 1].dropna().mean()
p2 = df['sm'][df.cohort >= 1958][df.cohort <= 1962][df.highnumber == 0].dropna().mean()
# Get regressions.
# Col 1.
rslts = regress(df=df, method='OLS', cohort_range=[1958, 1962], cohort_dummies=cohorts[29: 33], controls='n')
est_sm.append('-')
est_hn.append(rslts.params['highnumber'])
std_sm.append('-')
std_hn.append(rslts.HC0_se['highnumber'])
pval_sm.append('-')
pval_hn.append(rslts.pvalues.highnumber)
num_obs.append(rslts.nobs)
wald = (rslts.params['highnumber']/(p1 - p2))
mean_crime = df.crimerate[df.cohort >= 1958][df.cohort <= 1962][df.highnumber == 0].mean()
percent_change.append(100*wald/mean_crime)
# Col 2.
rslts = regress(df=df, method='OLS', cohort_range=[1958, 1962], cohort_dummies=cohorts[29: 33], controls='y')
est_sm.append('-')
est_hn.append(rslts.params['highnumber'])
std_sm.append('-')
std_hn.append(rslts.HC0_se['highnumber'])
pval_sm.append('-')
pval_hn.append(rslts.pvalues.highnumber)
num_obs.append(rslts.nobs)
wald = (rslts.params['highnumber']/(p1 - p2))
mean_crime = df.crimerate[df.cohort >= 1958][df.cohort <= 1962][df.highnumber == 0].mean()
percent_change.append(100*wald/mean_crime)
# Col 3.
rslts = regress(df=df, method='IV', cohort_range=[1958, 1962], cohort_dummies=cohorts[29: 33], controls='n')
est_sm.append(rslts.params['sm'])
est_hn.append('-')
std_sm.append(rslts.std_errors.sm)
std_hn.append('-')
pval_sm.append(rslts.pvalues.sm)
pval_hn.append('-')
num_obs.append(rslts.nobs)
mean_crime = df.crimerate[df.cohort >= 1958][df.cohort <= 1962][df.highnumber == 0].mean()
percent_change.append(100*rslts.params['sm']/mean_crime)
#Col 4.
rslts = regress(df=df, method='IV', cohort_range=[1958, 1962], cohort_dummies=cohorts[29: 33], controls='y')
est_sm.append(rslts.params['sm'])
est_hn.append('-')
std_sm.append(rslts.std_errors.sm)
std_hn.append('-')
pval_sm.append(rslts.pvalues.sm)
pval_hn.append('-')
num_obs.append(rslts.nobs)
mean_crime = df.crimerate[df.cohort >= 1958][df.cohort <= 1962][df.highnumber == 0].mean()
percent_change.append(100*rslts.params['sm']/mean_crime)
# Col 5.
rslts = regress(df=df, method='OLS', cohort_range=[1929, 1965], cohort_dummies=cohorts[0: 36], controls='n')
est_sm.append('-')
est_hn.append(rslts.params['highnumber'])
std_sm.append('-')
std_hn.append(rslts.HC0_se['highnumber'])
pval_sm.append('-')
pval_hn.append(rslts.pvalues.highnumber)
num_obs.append(rslts.nobs)
wald = (rslts.params['highnumber']/(p1 - p2))
mean_crime = df.crimerate[df.cohort >= 1929][df.cohort <= 1965][df.highnumber == 0].mean()
percent_change.append(100*wald/mean_crime)
# Col 6.
rslts = regress(df=df, method='OLS', cohort_range=[1929, 1955], cohort_dummies=cohorts[0: 26], controls='n')
est_sm.append('-')
est_hn.append(rslts.params['highnumber'])
std_sm.append('-')
std_hn.append(rslts.HC0_se['highnumber'])
pval_sm.append('-')
pval_hn.append(rslts.pvalues.highnumber)
num_obs.append(rslts.nobs)
wald = (rslts.params['highnumber']/(p1 - p2))
mean_crime = df.crimerate[df.cohort >= 1929][df.cohort <= 1955][df.highnumber == 0].mean()
percent_change.append(100*wald/mean_crime)
# Col 7.
rslts = regress(df=df, method='OLS', cohort_range=[1958, 1965], cohort_dummies=cohorts[29: 36], controls='n')
est_sm.append('-')
est_hn.append(rslts.params['highnumber'])
std_sm.append('-')
std_hn.append(rslts.HC0_se['highnumber'])
pval_sm.append('-')
pval_hn.append(rslts.pvalues.highnumber)
num_obs.append(rslts.nobs)
wald = (rslts.params['highnumber']/(p1 - p2))
mean_crime = df.crimerate[df.cohort >= 1958][df.cohort <= 1965][df.highnumber == 0].mean()
percent_change.append(100*wald/mean_crime)
return est_sm, est_hn, std_sm, std_hn, pval_sm, pval_hn, percent_change, num_obs
# Get table 4.
def table_4(df):
'''
Function returns table representing table 4 in Galiani et al. 2011.
Arguments:
df: data frame to use.
'''
#constant, highnumber, conscription, crimerate, malvinas, navy, origin, cohorts, districts, hn_malvinas, df = get_variables()
# Get regression results.
est_sm, est_hn, std_sm, std_hn, pval_sm, pval_hn, percent_change, num_obs = regressions_table_4(df)
# Print table.
print('\033[1m' 'Table 4 - Estimated Impact of Conscription on Crime Rates ' '\033[0m')
print(128*'_')
print('{:<15s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}'\
.format('Cohort', '1958-1962', '', "1958-1962", '', "1958-1962", '', "1958-1962", '', "1929-1965", '', "1929-1955", '', \
"1958-1965", '', ''))
print('{:<15s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}{:<3s}{:>13s}'\
.format('', '(1)', '', '(2)', '', '(3)', '', '(4)', '', '(5)', '', '(6)', '', \
'(7)', '', ''))
print(128*'_')
for i in range(len(est_hn)):
if i == 0:
print('{:<15s}'.format("Draft Eligible"), end="")
if type(est_hn[i]) == str:
print('{:>13s}{:<3s}'.format('', ''), end="")
else:
print('\033[1m' '{:>13.4f}{:<3s}' '\033[0m'.format(est_hn[i], significance(pval_hn[i])), end="")
print('\n')
for i in range(len(std_hn)):
if i == 0:
print('{:<15s}'.format(''), end="")
if type(est_hn[i]) == str:
print('{:>13s}{:<3s}'.format('', ''), end="")
else:
print('{:>13.4f}{:<3s}'.format(std_hn[i], ''), end="")
print('\n')
for i in range(len(est_sm)):
if i == 0:
print('{:<15s}'.format("Conscription"), end="")
if type(est_sm[i]) == str:
print('{:>13s}{:<3s}'.format('', ''), end="")
else:
print('\033[1m' '{:>13.4f}{:<3s}' '\033[0m'.format(est_sm[i], significance(pval_sm[i])), end="")
print('\n')
for i in range(len(std_sm)):
if i == 0:
print('{:<15s}'.format(''), end="")
if type(est_sm[i]) == str:
print('{:>13s}{:<3s}'.format('', ''), end="")
else:
print('{:>13.4f}{:<3s}'.format(std_sm[i], ''), end="")
print('\n')
for | |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
| |
that has no id (%r)'
% child)
return child_id
def __get__(self, instance, owner=None):
# We look up the id only when the attribute is accessed.
if instance is None: return self
theory_id = getattr(instance, self._private_attr_name)
if theory_id is None: return None
document = instance.document
if document is None:
return DanglingPointer(theory_id)
target = document.lookup_id(theory_id)
if target is None:
return DanglingPointer(theory_id)
if self._cls is not None:
if isinstance(self._cls, basestring):
self._cls = Theory._theory_classes[self._cls]
if not isinstance(target, self._cls):
raise ValueError('Expected %s to point to a %s' % (
self._attr_name, self._cls.__name__))
return target
def _cls_name(self):
if self._cls is None:
return 'theory object'
elif isinstance(self._cls, basestring):
return self._cls
else:
return self._cls.__name__
def help(self):
name = self._attr_name or self.__name__
s = 'a pointer to a %s extracted from the XML attribute %r' % (
self._cls_name(), name)
if self._is_required: s += ' (required)'
return s
class DanglingPointer(object):
"""
A class used by `_ReferenceAttribute` to indicate that the target
id has not yet been read. In particular, a DanglingPointer will
be returned by `ReferenceAttribute.__get__()` if a target pointer
id is not found in the identifier map.
"""
def __init__(self, id1):
self.id = id1
def __repr__(self):
return "<Dangling Pointer: id=%r>" % self.id
def _get_summary(self):
return "<Dangling Pointer: id=%r>" % self.id
class _OffsetAttribute(_AutoPopulatedXMLAttributeSpec):
"""
An attribute used to store a start or end offset. These
attributes may be stored in the XML in two different ways: either
using separate XML attributes for the begin and end offsets; or
using a single XML attribute for both. This AttributeSpec
subclass is responsible for reading both formats.
"""
def __init__(self, offset_side, offset_name, value_type=int):
_AutoPopulatedXMLAttributeSpec.__init__(self)
assert offset_side in ('start', 'end')
self.is_start = (offset_side == 'start')
self.offset_name = offset_name
self.offset_attr = '%s_%s' % (offset_side, offset_name)
self.condensed_offsets_attr = '%s_offsets' % offset_name
self._value_type = value_type
def get_value(self, etree, theory):
if self.offset_attr in etree.attrib:
return self._value_type(etree.attrib[self.offset_attr])
elif self.condensed_offsets_attr in etree.attrib:
s, e = etree.attrib[self.condensed_offsets_attr].split(':')
if self.is_start: return self._value_type(s)
else: return self._value_type(e)
else:
return None
def serialize(self, etree, theory, **options):
value = getattr(theory, self.__name__, None)
if value is not None:
if options.get('condensed_offsets', True):
etree.attrib[self.condensed_offsets_attr] = '%s:%s' % (
getattr(theory, 'start_%s' % self.offset_name),
getattr(theory, 'end_%s' % self.offset_name))
else:
etree.attrib[self.offset_attr] = '%s' % value
def help(self):
return 'an offset extracted from XML attribute %r or %r' % (
(self.offset_attr, self.condensed_offsets_attr))
class _ChildTheoryElement(_AutoPopulatedXMLAttributeSpec):
"""
An attribute used to hold a child theory that is described in
a child XML element.
"""
def __init__(self, cls_name, is_required=False):
"""
@param cls_name: The name of the theory class for the
child value.
"""
_AutoPopulatedXMLAttributeSpec.__init__(self)
self._is_required = is_required
self._cls_name = cls_name
def _get_child_elt(self, name, etree):
if isinstance(name, tuple):
elts = [elt for elt in etree if elt.tag in name]
name = ' or '.join(name) # for error messages.
else:
elts = [elt for elt in etree if elt.tag == name]
if len(elts) == 1:
return elts[0]
elif len(elts) > 1:
raise ValueError('Expected at most one %s' % name)
elif self._is_required:
raise ValueError('Expected exactly one %s' % name)
else:
return None
def serialize(self, etree, theory, **options):
child = getattr(theory, self.__name__, None)
if child is not None:
if (hasattr(child, '_etree') and child._etree in etree):
child_etree = child.toxml(child._etree, **options)
else:
child_etree = child.toxml(**options)
etree.append(child_etree)
if isinstance(self._cls_name, tuple):
assert child_etree.tag in self._cls_name
else:
assert child_etree.tag == self._cls_name
def get_value(self, etree, theory):
name = self._cls_name or self.__name__
child_elt = self._get_child_elt(name, etree)
if child_elt is None:
return None
cls = Theory._theory_classes.get(child_elt.tag)
if cls is None:
raise AssertionError('Theory class %s not defined!' % name)
return cls(child_elt, theory)
def help(self):
s = 'a child %s theory' % self._cls_name
if self._is_required: s += ' (required)'
else: s += ' (optional)'
return s
class _ChildTextElement(_ChildTheoryElement):
"""
An attribute whose value should be extracted from the string text
of a child XML element. (c.f. _TextOfElement)
"""
def get_value(self, etree, theory):
child_elt = self._get_child_elt(self._cls_name, etree)
if KEEP_ORIGINAL_ETREE:
self._child_elt = child_elt
if child_elt is None:
return None
else:
return child_elt.text
def serialize(self, etree, theory, **options):
text = getattr(theory, self.__name__, None)
if text is not None:
if hasattr(self, '_child_elt') and self._child_elt in etree:
child_etree = self._child_elt
else:
del etree[:]
child_etree = ET.Element(self._cls_name or self.__name__)
etree.append(child_etree)
child_etree.text = text
child_etree.tail = '\n'+options.get('indent', '')
def help(self):
return 'a text string extracted from the XML element %r' % (
self._cls_name)
class _TextOfElement(_AutoPopulatedXMLAttributeSpec):
"""
An attribute whose value should be extracted from the string text
of *this* XML element. (c.f. _ChildTextElement)
"""
def __init__(self, is_required=False, strip=False):
_AutoPopulatedXMLAttributeSpec.__init__(self)
self._strip = strip
self._is_required = is_required
def get_value(self, etree, theory):
text = etree.text or ''
if self._strip: text = text.strip()
if self._is_required and not text:
raise ValueError('Text content is required for %s' %
self.__name__)
return text
def serialize(self, etree, theory, **options):
text = getattr(theory, self.__name__, None)
if text is not None:
#assert etree.text is None # only one text string!
etree.text = text
def help(self):
return ("a text string extracted from this "
"theory's XML element text")
class _ChildTheoryElementList(_AutoPopulatedXMLAttributeSpec):
"""
An attribute whose value is a list of child theories. Each child
theory is deserialized from a single child XML element.
"""
def __init__(self, cls_name, index_attrib=None):
_AutoPopulatedXMLAttributeSpec.__init__(self)
self._cls_name = cls_name
self._index_attrib = index_attrib
def get_value(self, etree, theory):
name = self._cls_name or self.__name__
elts = [elt for elt in etree if elt.tag == name]
cls = Theory._theory_classes.get(name)
if cls is None:
raise AssertionError('Theory class %s not defined!' % name)
result = [cls(elt, theory) for elt in elts]
if self._index_attrib:
for i, child in enumerate(result):
child.__dict__[self._index_attrib] = i
return result
def serialize(self, etree, theory, **options):
children = getattr(theory, self.__name__, ())
if KEEP_ORIGINAL_ETREE:
child_etrees = set(etree)
else:
child_etrees = set()
for child in children:
if (hasattr(child, '_etree') and child._etree in child_etrees):
child_etree = child.toxml(child._etree, **options)
else:
child_etree = child.toxml(**options)
etree.append(child_etree)
assert child_etree.tag == self._cls_name
def default_value(self):
return []
def help(self):
s = 'a list of child %s theory objects' % self._cls_name
return s
######################################################################
#{ Enumerated Type metaclass
######################################################################
class EnumeratedType(type):
"""
>>> colors = EnumeratedType('colors', 'red green blue')
>>> assert colors.red != colors.green
>>> assert colors.red == colors.red
"""
class _BaseClass(object):
def __init__(self, value):
self.__value = value
self.__hash = hash(value)
def __repr__(self):
return '%s.%s' % (self.__class__.__name__, self.__value)
def __hash__(self):
return self.__hash
def __cmp__(self, other):
if self.__class__ != other.__class__:
raise ValueError(
"Attempt to compare %r value with %r value -- only "
"values from the same enumeration are comparable!" %
(self.__class__.__name__, other.__class__.__name__))
return cmp(self.__value, other.__value)
@property
def value(self):
return self.__value
def __new__(cls, name, values):
return type.__new__(cls, name, (cls._BaseClass,), {})
def __init__(cls, name, values):
if isinstance(values, basestring):
values = values.split()
cls.values = [cls(value) for value in values]
for enum_name, enum_value in zip(values, cls.values):
setattr(cls, enum_name, enum_value)
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
def __getitem__(self, i):
return self.values[i]
def __repr__(self):
return '<%s enumeration: %r>' % (self.__name__, tuple(self.values),)
######################################################################
#{ Theory Objects Base Classes
######################################################################
class Theory(object):
"""
The base class for theory types.
"""
_theory_classes = {}
class __metaclass__(type):
def __init__(cls, name, bases, dct):
type.__init__(cls, name, bases, dct)
# Register the class in a registry.
cls.__theory_name__ = name
if hasattr(cls, '__overrides__'):
cls.__theory_name__ = cls.__overrides__
#elif name in cls._theory_classes:
# print "Warning: overriding %s!" % name
cls._theory_classes[cls.__theory_name__] = cls
# Add an _auto_attribs attribute
cls._auto_attribs = [
(k,v) for (k,v) in dct.items()
if isinstance(v, _AutoPopulatedXMLAttributeSpec)]
for attr_name, attr_spec in cls._auto_attribs:
attr_spec.__name__ = attr_name
for base in bases:
cls._auto_attribs.extend(getattr(base, '_auto_attribs', []))
def sort_key(attrib):
return (attrib[1]._attribute_number, attrib[0].lower())
cls._auto_attribs.sort(key=sort_key)
_OWNER_IS_REQUIRED = True
def __init__(self, etree=None, owner=None, **attribs):
# Set our owner pointer.
if owner is not None:
self._owner = weakref.ref(owner)
elif self._OWNER_IS_REQUIRED:
raise ValueError('%s constructor requires an owner' %
self.__class__.__name__)
else:
self._owner = None
# Intialize, either from etree or from attributes.
if etree is not None:
if attribs:
raise ValueError('Specify etree or attribs, not both!')
self._init_from_etree(etree, owner)
else:
for attr_name, attr_spec in self._auto_attribs:
value = attribs.pop(attr_name, None)
if value is not None:
setattr(self, attr_name, value)
else:
setattr(self, attr_name, attr_spec.default_value())
def _init_from_etree(self, etree, owner):
assert etree is not None
if etree.tag != self.__class__.__theory_name__:
raise ValueError('Expected a %s, got a %s!' %
(self.__class__.__theory_name__, etree.tag))
if KEEP_ORIGINAL_ETREE:
self._etree = etree
# Fill | |
import re
from re import Pattern
from typing import NamedTuple, Tuple
__all__ = ["hand_translate"]
class Rule(NamedTuple):
pattern: Pattern
replacement: str
FLAGS = re.IGNORECASE
TRANSLATIONS: Tuple[Rule, ...] = tuple(
[
Rule(pattern=re.compile(r"\banak\b", FLAGS), replacement="child"),
Rule(pattern=re.compile(r"\bdan\b", FLAGS), replacement="and"),
Rule(pattern=re.compile(r"\bjam\b", FLAGS), replacement="hour"),
Rule(pattern=re.compile(r"\brak\b", FLAGS), replacement="rack"),
Rule(pattern=re.compile(r"\bmobil\b", FLAGS), replacement="car"),
Rule(pattern=re.compile(r"\bmasker\b", FLAGS), replacement="face mask"),
Rule(pattern=re.compile(r"\btahan\b", FLAGS), replacement="withstand"),
Rule(pattern=re.compile(r"\brasa\b", FLAGS), replacement="taste"),
Rule(pattern=re.compile(r"\brumah\b", FLAGS), replacement="home"),
Rule(pattern=re.compile(r"\bgratis\b", FLAGS), replacement="free"),
Rule(pattern=re.compile(r"\bteh\b", FLAGS), replacement="tea"),
Rule(pattern=re.compile(r"\bpena\b", FLAGS), replacement="pen"),
Rule(pattern=re.compile(r"\btulis\b", FLAGS), replacement="write"),
Rule(pattern=re.compile(r"\bban\b", FLAGS), replacement="belt"),
Rule(pattern=re.compile(r"\broda\b", FLAGS), replacement="wheels"),
Rule(pattern=re.compile(r"\bkrim\b", FLAGS), replacement="cream"),
Rule(pattern=re.compile(r"\btenda\b", FLAGS), replacement="tent"),
Rule(pattern=re.compile(r"\blama\b", FLAGS), replacement="long"),
Rule(pattern=re.compile(r"\babu\b", FLAGS), replacement="ash dust"),
Rule(pattern=re.compile(r"\byang\b", FLAGS), replacement="that"),
Rule(pattern=re.compile(r"\bpaling\b", FLAGS), replacement="most"),
Rule(pattern=re.compile(r"\bmuda\b", FLAGS), replacement="young"),
Rule(pattern=re.compile(r"\bplat\b", FLAGS), replacement="plate"),
# paste generated rules below
Rule(pattern=re.compile(r"\bwanita\b", FLAGS), replacement="women"),
Rule(pattern=re.compile(r"\bmurah\b", FLAGS), replacement="cheap"),
Rule(pattern=re.compile(r"\btas\b", FLAGS), replacement="bag"),
Rule(pattern=re.compile(r"\bpria\b", FLAGS), replacement="men"),
Rule(pattern=re.compile(r"\bbayi\b", FLAGS), replacement="baby"),
Rule(pattern=re.compile(r"\buntuk\b", FLAGS), replacement="for"),
Rule(pattern=re.compile(r"\bisi\b", FLAGS), replacement="contents"),
Rule(pattern=re.compile(r"\btangan\b", FLAGS), replacement="hand"),
Rule(pattern=re.compile(r"\bkaos\b", FLAGS), replacement="t-shirt"),
Rule(pattern=re.compile(r"\bwarna\b", FLAGS), replacement="colour"),
Rule(pattern=re.compile(r"\bbaju\b", FLAGS), replacement="clothes"),
Rule(pattern=re.compile(r"\bsepatu\b", FLAGS), replacement="shoes"),
Rule(pattern=re.compile(r"\bcelana\b", FLAGS), replacement="pants"),
Rule(pattern=re.compile(r"\balat\b", FLAGS), replacement="tool"),
Rule(pattern=re.compile(r"\bbahan\b", FLAGS), replacement="ingredient"),
Rule(pattern=re.compile(r"\bpolos\b", FLAGS), replacement="plain"),
Rule(pattern=re.compile(r"\brambut\b", FLAGS), replacement="hair"),
Rule(pattern=re.compile(r"\btermurah\b", FLAGS), replacement="cheapest"),
Rule(pattern=re.compile(r"\bbisa\b", FLAGS), replacement="can"),
Rule(pattern=re.compile(r"\blampu\b", FLAGS), replacement="lamp"),
Rule(pattern=re.compile(r"\bmainan\b", FLAGS), replacement="toy"),
Rule(
pattern=re.compile(r"\bhijab\b", FLAGS),
replacement="religious women head covering",
),
Rule(pattern=re.compile(r"\bpanjang\b", FLAGS), replacement="long"),
Rule(
pattern=re.compile(r"\bsar[uo]ng\b", FLAGS),
replacement="wrap knot cover lower body",
),
Rule(pattern=re.compile(r"\bkain\b", FLAGS), replacement="fabric"),
Rule(pattern=re.compile(r"\bkarakter\b", FLAGS), replacement="character"),
Rule(pattern=re.compile(r"\bsabun\b", FLAGS), replacement="soap"),
Rule(pattern=re.compile(r"\bpaket\b", FLAGS), replacement="packet"),
Rule(pattern=re.compile(r"\bplastik\b", FLAGS), replacement="plastic"),
Rule(pattern=re.compile(r"\bhitam\b", FLAGS), replacement="black"),
Rule(pattern=re.compile(r"\btempat\b", FLAGS), replacement="place"),
Rule(pattern=re.compile(r"\bgamis\b", FLAGS), replacement="robe"),
Rule(pattern=re.compile(r"\bbotol\b", FLAGS), replacement="bottle"),
Rule(pattern=re.compile(r"\bputih\b", FLAGS), replacement="white"),
Rule(pattern=re.compile(r"\bdengan\b", FLAGS), replacement="with"),
Rule(pattern=re.compile(r"\bkaki\b", FLAGS), replacement="feet"),
Rule(pattern=re.compile(r"\bwajah\b", FLAGS), replacement="face"),
Rule(
pattern=re.compile(r"\bjilbab\b", FLAGS),
replacement="religious women head covering",
),
Rule(pattern=re.compile(r"\bgrosir\b", FLAGS), replacement="wholesaler"),
Rule(pattern=re.compile(r"\bdompet\b", FLAGS), replacement="purse"),
Rule(pattern=re.compile(r"\btali\b", FLAGS), replacement="rope"),
Rule(pattern=re.compile(r"\bpendek\b", FLAGS), replacement="short"),
Rule(pattern=re.compile(r"\bserbaguna\b", FLAGS), replacement="multi-purpose"),
Rule(pattern=re.compile(r"\bkabel\b", FLAGS), replacement="cable"),
Rule(pattern=re.compile(r"\bbuku\b", FLAGS), replacement="book"),
Rule(pattern=re.compile(r"\btahun\b", FLAGS), replacement="year"),
Rule(pattern=re.compile(r"\bkotak\b", FLAGS), replacement="box"),
Rule(pattern=re.compile(r"\bpembersih\b", FLAGS), replacement="cleaners"),
Rule(pattern=re.compile(r"\blipat\b", FLAGS), replacement="folding"),
Rule(pattern=re.compile(r"\bminyak\b", FLAGS), replacement="oil"),
Rule(pattern=re.compile(r"\bbunga\b", FLAGS), replacement="flower"),
Rule(pattern=re.compile(r"\bselempang\b", FLAGS), replacement="sash"),
Rule(pattern=re.compile(r"\bkulit\b", FLAGS), replacement="skin"),
Rule(pattern=re.compile(r"\basli\b", FLAGS), replacement="original"),
Rule(pattern=re.compile(r"\bkatun\b", FLAGS), replacement="cotton"),
Rule(pattern=re.compile(r"\bsilikon\b", FLAGS), replacement="silicone"),
Rule(pattern=re.compile(r"\btopi\b", FLAGS), replacement="hat"),
Rule(pattern=re.compile(r"\bterbaru\b", FLAGS), replacement="the latest"),
Rule(pattern=re.compile(r"\bdewasa\b", FLAGS), replacement="adult"),
Rule(pattern=re.compile(r"\binstan\b", FLAGS), replacement="instant"),
Rule(pattern=re.compile(r"\bmadu\b", FLAGS), replacement="honey"),
Rule(pattern=re.compile(r"\bobat\b", FLAGS), replacement="drug"),
Rule(pattern=re.compile(r"\bmandi\b", FLAGS), replacement="bath"),
Rule(pattern=re.compile(r"\bdinding\b", FLAGS), replacement="wall"),
Rule(pattern=re.compile(r"\bterlaris\b", FLAGS), replacement="bestseller"),
Rule(pattern=re.compile(r"\bsusu\b", FLAGS), replacement="milk"),
Rule(pattern=re.compile(r"\bkaca\b", FLAGS), replacement="glass"),
Rule(pattern=re.compile(r"\bdapur\b", FLAGS), replacement="kitchen"),
Rule(pattern=re.compile(r"\bkacamata\b", FLAGS), replacement="spectacles"),
Rule(pattern=re.compile(r"\bukuran\b", FLAGS), replacement="size"),
Rule(pattern=re.compile(r"\blengan\b", FLAGS), replacement="arm"),
Rule(pattern=re.compile(r"\bresmi\b", FLAGS), replacement="official"),
Rule(pattern=re.compile(r"\bmata\b", FLAGS), replacement="eye"),
Rule(pattern=re.compile(r"\blucu\b", FLAGS), replacement="funny"),
Rule(pattern=re.compile(r"\bgaransi\b", FLAGS), replacement="warranty"),
Rule(pattern=re.compile(r"\bkuas\b", FLAGS), replacement="brush"),
Rule(pattern=re.compile(r"\bgantungan\b", FLAGS), replacement="hanger"),
Rule(pattern=re.compile(r"\bcuci\b", FLAGS), replacement="wash"),
Rule(pattern=re.compile(r"\bbesar\b", FLAGS), replacement="big"),
Rule(pattern=re.compile(r"\bperempuan\b", FLAGS), replacement="girls"),
Rule(pattern=re.compile(r"\bsetelan\b", FLAGS), replacement="suit"),
Rule(pattern=re.compile(r"\bsepeda\b", FLAGS), replacement="bike"),
Rule(pattern=re.compile(r"\bdalam\b", FLAGS), replacement="in"),
Rule(pattern=re.compile(r"\bikat\b", FLAGS), replacement="tie-up"),
Rule(pattern=re.compile(r"\btanpa\b", FLAGS), replacement="without"),
Rule(pattern=re.compile(r"\bkecil\b", FLAGS), replacement="small"),
Rule(pattern=re.compile(r"\brajut\b", FLAGS), replacement="knit"),
Rule(pattern=re.compile(r"\btebal\b", FLAGS), replacement="thick"),
Rule(pattern=re.compile(r"\basi\b", FLAGS), replacement="breast milk"),
Rule(pattern=re.compile(r"\bmerah\b", FLAGS), replacement="red"),
Rule(pattern=re.compile(r"\bransel\b", FLAGS), replacement="backpack"),
Rule(pattern=re.compile(r"\bcoklat\b", FLAGS), replacement="chocolate"),
Rule(pattern=re.compile(r"\bbuah\b", FLAGS), replacement="fruit"),
Rule(pattern=re.compile(r"\bkayu\b", FLAGS), replacement="wood"),
Rule(pattern=re.compile(r"\bkucing\b", FLAGS), replacement="cat"),
Rule(pattern=re.compile(r"\bpisau\b", FLAGS), replacement="knife"),
Rule(pattern=re.compile(r"\bkosmetik\b", FLAGS), replacement="cosmetics"),
Rule(pattern=re.compile(r"\bpakaian\b", FLAGS), replacement="clothes"),
Rule(pattern=re.compile(r"\btunik\b", FLAGS), replacement="tunic"),
Rule(pattern=re.compile(r"\bkemasan\b", FLAGS), replacement="packaging"),
Rule(pattern=re.compile(r"\bharga\b", FLAGS), replacement="price"),
Rule(pattern=re.compile(r"\bbayar\b", FLAGS), replacement="pay"),
Rule(
pattern=re.compile(r"\bmultifungsi\b", FLAGS), replacement="multi-function"
),
Rule(pattern=re.compile(r"\bpelindung\b", FLAGS), replacement="protector"),
Rule(pattern=re.compile(r"\belektrik\b", FLAGS), replacement="electric"),
Rule(pattern=re.compile(r"\bgigi\b", FLAGS), replacement="tooth"),
Rule(pattern=re.compile(r"\bhanduk\b", FLAGS), replacement="towel"),
Rule(pattern=re.compile(r"\bbaterai\b", FLAGS), replacement="battery"),
Rule(pattern=re.compile(r"\bparfum\b", FLAGS), replacement="perfume"),
Rule(pattern=re.compile(r"\bbumbu\b", FLAGS), replacement="seasoning"),
Rule(pattern=re.compile(r"\bkertas\b", FLAGS), replacement="paper"),
Rule(pattern=re.compile(r"\bbulu\b", FLAGS), replacement="fur"),
Rule(pattern=re.compile(r"\bpensil\b", FLAGS), replacement="pencil"),
Rule(pattern=re.compile(r"\bbola\b", FLAGS), replacement="ball"),
Rule(pattern=re.compile(r"\bmakanan\b", FLAGS), replacement="food"),
Rule(pattern=re.compile(r"\bcair\b", FLAGS), replacement="liquid"),
Rule(pattern=re.compile(r"\bjepit\b", FLAGS), replacement="clip"),
Rule(pattern=re.compile(r"\bkipas\b", FLAGS), replacement="fan"),
Rule(pattern=re.compile(r"\bbantal\b", FLAGS), replacement="pillow"),
Rule(pattern=re.compile(r"\bbaru\b", FLAGS), replacement="new"),
Rule(pattern=re.compile(r"\bdaster\b", FLAGS), replacement="house dress"),
Rule(pattern=re.compile(r"\blem\b", FLAGS), replacement="glue"),
Rule(pattern=re.compile(r"\btidur\b", FLAGS), replacement="sleep"),
Rule(pattern=re.compile(r"\bgantung\b", FLAGS), replacement="hanging"),
Rule(pattern=re.compile(r"\bolahraga\b", FLAGS), replacement="sports"),
Rule(pattern=re.compile(r"\bmeja\b", FLAGS), replacement="table"),
Rule(pattern=re.compile(r"\bikan\b", FLAGS), replacement="fish"),
Rule(pattern=re.compile(r"\btimbangan\b", FLAGS), replacement="balance"),
Rule(pattern=re.compile(r"\bmakan\b", FLAGS), replacement="eat"),
Rule(pattern=re.compile(r"\bsikat\b", FLAGS), replacement="brush"),
Rule(pattern=re.compile(r"\bedukasi\b", FLAGS), replacement="education"),
Rule(pattern=re.compile(r"\bpompa\b", FLAGS), replacement="pump"),
Rule(pattern=re.compile(r"\bbiru\b", FLAGS), replacement="blue"),
Rule(pattern=re.compile(r"\borganik\b", FLAGS), replacement="organic"),
Rule(pattern=re.compile(r"\bbalon\b", FLAGS), replacement="balloon"),
Rule(pattern=re.compile(r"\bsusun\b", FLAGS), replacement="stack"),
Rule(pattern=re.compile(r"\bstiker\b", FLAGS), replacement="sticker"),
Rule(pattern=re.compile(r"\bdekorasi\b", FLAGS), replacement="decoration"),
Rule(pattern=re.compile(r"\bkemeja\b", FLAGS), replacement="shirt"),
Rule(pattern=re.compile(r"\bmesin\b", FLAGS), replacement="machine"),
Rule(pattern=re.compile(r"\bkurma\b", FLAGS), replacement="dates"),
Rule(pattern=re.compile(r"\btelur\b", FLAGS), replacement="egg"),
Rule(pattern=re.compile(r"\bmie\b", FLAGS), replacement="noodles"),
Rule(pattern=re.compile(r"\bkartu\b", FLAGS), replacement="card"),
Rule(pattern=re.compile(r"\bbadan\b", FLAGS), replacement="body"),
Rule(pattern=re.compile(r"\bbusa\b", FLAGS), replacement="foam"),
Rule(pattern=re.compile(r"\bpiring\b", FLAGS), replacement="plate"),
Rule(pattern=re.compile(r"\bsendok\b", FLAGS), replacement="spoon"),
Rule(pattern=re.compile(r"\bangin\b", FLAGS), replacement="wind"),
Rule(pattern=re.compile(r"\bpemutih\b", FLAGS), replacement="bleach"),
Rule(pattern=re.compile(r"\bpanci\b", FLAGS), replacement="pan"),
Rule(pattern=re.compile(r"\bjaket\b", FLAGS), replacement="jacket"),
Rule(pattern=re.compile(r"\bbulat\b", FLAGS), replacement="round"),
Rule(pattern=re.compile(r"\bcowok\b", FLAGS), replacement="man"),
Rule(pattern=re.compile(r"\bbentuk\b", FLAGS), replacement="shape"),
Rule(pattern=re.compile(r"\bboneka\b", FLAGS), replacement="doll"),
Rule(pattern=re.compile(r"\bcewek\b", FLAGS), replacement="girl"),
Rule(pattern=re.compile(r"\bkartun\b", FLAGS), replacement="cartoon"),
Rule(pattern=re.compile(r"\bpiyama\b", FLAGS), replacement="sleeping clothes"),
Rule(pattern=re.compile(r"\baksesoris\b", FLAGS), replacement="accessories"),
Rule(pattern=re.compile(r"\bpulpen\b", FLAGS), replacement="pen"),
Rule(
pattern=re.compile(r"\bbatik\b", FLAGS),
replacement="coloured designs on clothes",
),
Rule(pattern=re.compile(r"\bkantong\b", FLAGS), replacement="bag"),
Rule(pattern=re.compile(r"\bkapsul\b", FLAGS), replacement="capsule"),
Rule(pattern=re.compile(r"\bminum\b", FLAGS), replacement="drink"),
Rule(pattern=re.compile(r"\bbening\b", FLAGS), replacement="clear"),
Rule(pattern=re.compile(r"\bserut\b", FLAGS), replacement="shaved"),
Rule(pattern=re.compile(r"\bbordir\b", FLAGS), replacement="embroidery"),
Rule(pattern=re.compile(r"\bpinggang\b", FLAGS), replacement="waist"),
Rule(pattern=re.compile(r"\blaki\b", FLAGS), replacement="men"),
Rule(pattern=re.compile(r"\bcukur\b", FLAGS), replacement="shaving"),
Rule(pattern=re.compile(r"\bgoreng\b", FLAGS), replacement="fried"),
Rule(pattern=re.compile(r"\bkualitas\b", FLAGS), replacement="quality"),
Rule(pattern=re.compile(r"\blembut\b", FLAGS), replacement="soft"),
Rule(pattern=re.compile(r"\bpanas\b", FLAGS), replacement="hot"),
Rule(pattern=re.compile(r"\bkue\b", FLAGS), replacement="pastries"),
Rule(pattern=re.compile(r"\bbubuk\b", FLAGS), replacement="powder"),
Rule(pattern=re.compile(r"\belastis\b", FLAGS), replacement="elastic"),
Rule(pattern=re.compile(r"\bbedak\b", FLAGS), replacement="powder"),
Rule(pattern=re.compile(r"\bbesi\b", FLAGS), replacement="iron"),
Rule(pattern=re.compile(r"\bbasah\b", FLAGS), replacement="wet"),
Rule(pattern=re.compile(r"\batasan\b", FLAGS), replacement="superior"),
Rule(pattern=re.compile(r"\bkopi\b", FLAGS), replacement="coffee"),
Rule(pattern=re.compile(r"\bdaun\b", FLAGS), replacement="leaf"),
Rule(pattern=re.compile(r"\blistrik\b", FLAGS), replacement="electricity"),
Rule(pattern=re.compile(r"\btransparan\b", FLAGS), replacement="transparent"),
Rule(pattern=re.compile(r"\bkerudung\b", FLAGS), replacement="veil"),
Rule(pattern=re.compile(r"\btisu\b", FLAGS), replacement="tissue"),
Rule(pattern=re.compile(r"\bhalus\b", FLAGS), replacement="smooth"),
Rule(pattern=re.compile(r"\btutup\b", FLAGS), replacement="closed"),
Rule(pattern=re.compile(r"\bberkualitas\b", FLAGS), replacement="quality"),
Rule(pattern=re.compile(r"\bkran\b", FLAGS), replacement="faucet"),
Rule(pattern=re.compile(r"\btanaman\b", FLAGS), replacement="plant"),
Rule(pattern=re.compile(r"\bhias\b", FLAGS), replacement="ornamental"),
Rule(pattern=re.compile(r"\bkuning\b", FLAGS), replacement="yellow"),
Rule(pattern=re.compile(r"\bvanila\b", FLAGS), replacement="vanilla"),
Rule(pattern=re.compile(r"\bcantik\b", FLAGS), replacement="beautiful"),
Rule(pattern=re.compile(r"\bkepala\b", FLAGS), replacement="head"),
Rule(pattern=re.compile(r"\bmerk\b", FLAGS), replacement="brand"),
Rule(pattern=re.compile(r"\busia\b", FLAGS), replacement="age"),
Rule(pattern=re.compile(r"\bimpor\b", FLAGS), replacement="import"),
Rule(pattern=re.compile(r"\bayam\b", FLAGS), replacement="chicken"),
Rule(pattern=re.compile(r"\blapis\b", FLAGS), replacement="layers"),
Rule(pattern=re.compile(r"\bkamar\b", FLAGS), replacement="room"),
Rule(pattern=re.compile(r"\bjerawat\b", FLAGS), replacement="acne"),
Rule(pattern=re.compile(r"\bgula\b", FLAGS), replacement="sugar"),
Rule(pattern=re.compile(r"\bgambar\b", FLAGS), replacement="picture"),
Rule(pattern=re.compile(r"\bpopok\b", FLAGS), replacement="diaper"),
Rule(pattern=re.compile(r"\bbawang\b", FLAGS), replacement="onion"),
Rule(pattern=re.compile(r"\bbando\b", FLAGS), replacement="headband"),
Rule(pattern=re.compile(r"\bkamera\b", FLAGS), replacement="camera"),
Rule(pattern=re.compile(r"\bhijau\b", FLAGS), replacement="green"),
Rule(pattern=re.compile(r"\bgaya\b", FLAGS), replacement="style"),
Rule(pattern=re.compile(r"\bbeli\b", FLAGS), replacement="buy"),
Rule(pattern=re.compile(r"\bsprei\b", FLAGS), replacement="bedsheet"),
Rule(pattern=re.compile(r"\becer\b", FLAGS), replacement="retail"),
Rule(pattern=re.compile(r"\bkeranjang\b", FLAGS), replacement="basket"),
Rule(pattern=re.compile(r"\bcairan\b", FLAGS), replacement="liquid"),
Rule(pattern=re.compile(r"\bwarni\b", FLAGS), replacement="colourful"),
Rule(pattern=re.compile(r"\bmasak\b", FLAGS), replacement="cook"),
Rule(pattern=re.compile(r"\bkunci\b", FLAGS), replacement="key"),
Rule(pattern=re.compile(r"\bbintang\b", FLAGS), replacement="star"),
Rule(pattern=re.compile(r"\bkering\b", FLAGS), replacement="dry"),
Rule(pattern=re.compile(r"\bspons\b", FLAGS), replacement="sponge"),
Rule(pattern=re.compile(r"\bpelangsing\b", FLAGS), replacement="slimming"),
Rule(pattern=re.compile(r"\bmawar\b", FLAGS), replacement="rose"),
Rule(pattern=re.compile(r"\balis\b", FLAGS), replacement="eyebrow"),
Rule(pattern=re.compile(r"\blengkap\b", FLAGS), replacement="complete"),
Rule(pattern=re.compile(r"\bsambungan\b", FLAGS), replacement="connection"),
Rule(pattern=re.compile(r"\bmika\b", FLAGS), replacement="mica"),
Rule(pattern=re.compile(r"\bbulan\b", FLAGS), replacement="month"),
Rule(pattern=re.compile(r"\bkekinian\b", FLAGS), replacement="present"),
Rule(pattern=re.compile(r"\bkuku\b", FLAGS), replacement="nail"),
Rule(pattern=re.compile(r"\bperekat\b", FLAGS), replacement="adhesive"),
Rule(pattern=re.compile(r"\blayar\b", FLAGS), replacement="screen"),
Rule(pattern=re.compile(r"\bhuruf\b", FLAGS), replacement="alphabet"),
Rule(pattern=re.compile(r"\bberas\b", FLAGS), replacement="rice"),
Rule(pattern=re.compile(r"\bsekolah\b", FLAGS), replacement="school"),
Rule(pattern=re.compile(r"\bcetakan\b", FLAGS), replacement="print"),
Rule(pattern=re.compile(r"\brenda\b", FLAGS), replacement="lace"),
Rule(pattern=re.compile(r"\bpita\b", FLAGS), replacement="tape"),
Rule(pattern=re.compile(r"\bmangkok\b", FLAGS), replacement="bowl"),
Rule(pattern=re.compile(r"\btempel\b", FLAGS), replacement="paste"),
Rule(pattern=re.compile(r"\btambahan\b", FLAGS), replacement="additional"),
Rule(pattern=re.compile(r"\btinggi\b", FLAGS), replacement="high"),
Rule(pattern=re.compile(r"\bunik\b", FLAGS), replacement="unique"),
Rule(pattern=re.compile(r"\bsendal\b", FLAGS), replacement="sandals"),
Rule(pattern=re.compile(r"\bjepang\b", FLAGS), replacement="japan"),
Rule(pattern=re.compile(r"\bkeju\b", FLAGS), replacement="cheese"),
Rule(pattern=re.compile(r"\bpenyimpanan\b", FLAGS), replacement="storage"),
Rule(pattern=re.compile(r"\bkancing\b", FLAGS), replacement="buttons"),
Rule(pattern=re.compile(r"\blemari\b", FLAGS), replacement="cupboard"),
Rule(pattern=re.compile(r"\bhamil\b", FLAGS), replacement="pregnant"),
Rule(pattern=re.compile(r"\bpenutup\b", FLAGS), replacement="covering"),
Rule(
pattern=re.compile(r"\bkeren\b", FLAGS),
replacement="fashionable attractive",
),
Rule(pattern=re.compile(r"\bkombinasi\b", FLAGS), replacement="combination"),
Rule(pattern=re.compile(r"\bquran\b", FLAGS), replacement="holy book"),
Rule(pattern=re.compile(r"\bpayung\b", FLAGS), replacement="umbrella"),
Rule(pattern=re.compile(r"\bkulkas\b", FLAGS), replacement="refrigerator"),
Rule(pattern=re.compile(r"\bkerja\b", FLAGS), replacement="work"),
Rule(pattern=re.compile(r"\bmutiara\b", FLAGS), replacement="pearl"),
Rule(pattern=re.compile(r"\bdari\b", FLAGS), replacement="from"),
Rule(pattern=re.compile(r"\bperlengkapan\b", FLAGS), replacement="equipment"),
Rule(pattern=re.compile(r"\blakban\b", FLAGS), replacement="duct tape"),
Rule(pattern=re.compile(r"\bkeset\b", FLAGS), replacement="mat"),
Rule(pattern=re.compile(r"\boleh\b", FLAGS), replacement="by"),
Rule(pattern=re.compile(r"\bhujan\b", FLAGS), replacement="rain"),
Rule(pattern=re.compile(r"\bsegi\b", FLAGS), replacement="side"),
Rule(pattern=re.compile(r"\bkawat\b", FLAGS), replacement="wire"),
Rule(pattern=re.compile(r"\bselang\b", FLAGS), replacement="hose"),
Rule(pattern=re.compile(r"\bba[k]?so\b", FLAGS), replacement="meatball"),
Rule(pattern=re.compile(r"\bpijat\b", FLAGS), replacement="massage"),
Rule(pattern=re.compile(r"\bkomedo\b", FLAGS), replacement="blackheads"),
Rule(pattern=re.compile(r"\blakilaki\b", FLAGS), replacement="man"),
Rule(pattern=re.compile(r"\bbagus\b", FLAGS), replacement="good"),
Rule(pattern=re.compile(r"\bbelajar\b", FLAGS), replacement="study"),
Rule(pattern=re.compile(r"\bgurita\b", FLAGS), replacement="octopus"),
Rule(pattern=re.compile(r"\bsaus\b", FLAGS), replacement="sauce"),
Rule(pattern=re.compile(r"\broti\b", FLAGS), replacement="bread"),
Rule(pattern=re.compile(r"\bjarum\b", FLAGS), replacement="needle"),
Rule(pattern=re.compile(r"\bhiasan\b", FLAGS), replacement="decoration"),
Rule(pattern=re.compile(r"\bcatok\b", FLAGS), replacement="clamp"),
Rule(pattern=re.compile(r"\bmuat\b", FLAGS), replacement="load"),
Rule(
pattern=re.compile(r"\badem\b", FLAGS), replacement="fashionable attractive"
),
Rule(pattern=re.compile(r"\blembar\b", FLAGS), replacement="sheet"),
Rule(pattern=re.compile(r"\bdepan\b", FLAGS), replacement="front"),
Rule(pattern=re.compile(r"\bcadar\b", FLAGS), replacement="veil"),
Rule(pattern=re.compile(r"\btelon\b", FLAGS), replacement="telephone"),
Rule(pattern=re.compile(r"\btelinga\b", FLAGS), replacement="ear"),
Rule(pattern=re.compile(r"\bkorset\b", FLAGS), replacement="corset"),
Rule(pattern=re.compile(r"\bkanebo\b", FLAGS), replacement="chamois cloth"),
Rule(pattern=re.compile(r"\bdua\b", FLAGS), replacement="two"),
Rule(pattern=re.compile(r"\bpraktis\b", FLAGS), replacement="practical"),
Rule(pattern=re.compile(r"\bcermin\b", FLAGS), replacement="mirror"),
Rule(pattern=re.compile(r"\bkode\b", FLAGS), replacement="code"),
Rule(pattern=re.compile(r"\bulang\b", FLAGS), replacement="repeated"),
Rule(pattern=re.compile(r"\bgelas\b", FLAGS), replacement="glass"),
Rule(pattern=re.compile(r"\bpakai\b", FLAGS), replacement="wear"),
Rule(pattern=re.compile(r"\bpintu\b", FLAGS), replacement="door"),
Rule(pattern=re.compile(r"\bplisket\b", FLAGS), replacement="plastic tape"),
Rule(pattern=re.compile(r"\bdebu\b", FLAGS), replacement="dust"),
Rule(pattern=re.compile(r"\bnyamuk\b", FLAGS), replacement="mosquito"),
Rule(pattern=re.compile(r"\bsajadah\b", FLAGS), replacement="prayer mat"),
Rule(pattern=re.compile(r"\bkeran\b", FLAGS), replacement="tap"),
Rule(pattern=re.compile(r"\bpalsu\b", FLAGS), replacement="false"),
Rule(pattern=re.compile(r"\bsedotan\b", FLAGS), replacement="straw"),
Rule(pattern=re.compile(r"\bbelakang\b", FLAGS), replacement="back"),
Rule(pattern=re.compile(r"\bjaring\b", FLAGS), replacement="net"),
Rule(pattern=re.compile(r"\bpelancar\b", FLAGS), replacement="smoother"),
Rule(pattern=re.compile(r"\bbendera\b", FLAGS), replacement="flag"),
Rule(pattern=re.compile(r"\bkantor\b", FLAGS), replacement="office"),
Rule(pattern=re.compile(r"\bpenghilang\b", FLAGS), replacement="remover"),
Rule(pattern=re.compile(r"\bsalep\b", FLAGS), replacement="ointment"),
Rule(pattern=re.compile(r"\bongkir\b", FLAGS), replacement="shipping"),
Rule(pattern=re.compile(r"\bperawatan\b", FLAGS), replacement="care"),
Rule(pattern=re.compile(r"\bselimut\b", FLAGS), replacement="blanket"),
Rule(pattern=re.compile(r"\bgigitan\b", FLAGS), replacement="bite"),
Rule(pattern=re.compile(r"\bleher\b", FLAGS), replacement="neck"),
Rule(pattern=re.compile(r"\bruangan\b", FLAGS), replacement="room"),
Rule(pattern=re.compile(r"\bsayur\b", FLAGS), replacement="vegetable"),
Rule(pattern=re.compile(r"\bkolam\b", FLAGS), replacement="pool"),
Rule(pattern=re.compile(r"\bsalur\b", FLAGS), replacement="channel"),
Rule(pattern=re.compile(r"\bsambal\b", FLAGS), replacement="chili sauce"),
Rule(pattern=re.compile(r"\bsudut\b", FLAGS), replacement="corner"),
Rule(pattern=re.compile(r"\bjamur\b", FLAGS), replacement="mushrooms"),
Rule(pattern=re.compile(r"\bpenjepit\b", FLAGS), replacement="clamp"),
Rule(pattern=re.compile(r"\baman\b", FLAGS), replacement="secure"),
Rule(pattern=re.compile(r"\blubang\b", FLAGS), replacement="hole"),
Rule(pattern=re.compile(r"\bmpasi\b", FLAGS), replacement="wait"),
Rule(pattern=re.compile(r"\bangka\b", FLAGS), replacement="numbers"),
Rule(pattern=re.compile(r"\bbatre\b", FLAGS), replacement="battery"),
Rule(pattern=re.compile(r"\bdus\b", FLAGS), replacement="box"),
Rule(pattern=re.compile(r"\btepung\b", FLAGS), replacement="flour"),
Rule(pattern=re.compile(r"\bkelambu\b", FLAGS), replacement="mosquito net"),
Rule(pattern=re.compile(r"\bsatuan\b", FLAGS), replacement="unit"),
Rule(pattern=re.compile(r"\bcokelat\b", FLAGS), replacement="chocolate"),
Rule(pattern=re.compile(r"\bpedas\b", FLAGS), replacement="spicy"),
Rule(pattern=re.compile(r"\batau\b", FLAGS), replacement="or"),
Rule(pattern=re.compile(r"\blokal\b", FLAGS), replacement="local"),
Rule(pattern=re.compile(r"\brantai\b", FLAGS), replacement="chain"),
Rule(pattern=re.compile(r"\bjeruk\b", FLAGS), replacement="orange"),
Rule(pattern=re.compile(r"\btoples\b", FLAGS), replacement="jar"),
Rule(pattern=re.compile(r"\bperlak\b", FLAGS), replacement="treatment"),
Rule(pattern=re.compile(r"\bgelang\b", FLAGS), replacement="bracelet"),
Rule(pattern=re.compile(r"\bpasang\b", FLAGS), replacement="plug it in"),
Rule(pattern=re.compile(r"\bgarpu\b", FLAGS), replacement="fork"),
Rule(pattern=re.compile(r"\bsaringan\b", FLAGS), replacement="filter"),
Rule(pattern=re.compile(r"\bmedis\b", FLAGS), replacement="medical"),
Rule(pattern=re.compile(r"\bspon\b", FLAGS), replacement="sponge"),
Rule(pattern=re.compile(r"\btabung\b", FLAGS), replacement="tube"),
Rule(pattern=re.compile(r"\bkapas\b", FLAGS), replacement="cotton"),
Rule(pattern=re.compile(r"\bemas\b", FLAGS), replacement="gold"),
Rule(pattern=re.compile(r"\bpasir\b", FLAGS), replacement="sand"),
Rule(pattern=re.compile(r"\bshampo\b", FLAGS), replacement="shampoo"),
Rule(pattern=re.compile(r"\bstok\b", FLAGS), replacement="stock"),
Rule(pattern=re.compile(r"\bgunting\b", FLAGS), replacement="scissor"),
Rule(pattern=re.compile(r"\btidak\b", FLAGS), replacement="not"),
Rule(pattern=re.compile(r"\bmenyusui\b", FLAGS), replacement="breastfeeding"),
Rule(pattern=re.compile(r"\breguler\b", FLAGS), replacement="regular"),
Rule(pattern=re.compile(r"\bminuman\b", FLAGS), replacement="drink"),
Rule(pattern=re.compile(r"\bpesta\b", FLAGS), replacement="party"),
Rule(pattern=re.compile(r"\btulang\b", FLAGS), replacement="bone"),
Rule(pattern=re.compile(r"\bgaris\b", FLAGS), replacement="line"),
Rule(pattern=re.compile(r"\bpentil\b", FLAGS), replacement="valve"),
Rule(pattern=re.compile(r"\bjahe\b", FLAGS), replacement="ginger"),
Rule(pattern=re.compile(r"\bapel\b", FLAGS), replacement="apple"),
Rule(pattern=re.compile(r"\bstelan\b", FLAGS), replacement="suit"),
Rule(pattern=re.compile(r"\btinta\b", FLAGS), replacement="ink"),
Rule(pattern=re.compile(r"\bgalon\b", FLAGS), replacement="gallon"),
Rule(pattern=re.compile(r"\blipstik\b", FLAGS), replacement="lipstick"),
Rule(pattern=re.compile(r"\bsumpit\b", FLAGS), replacement="chopsticks"),
Rule(pattern=re.compile(r"\bproduk\b", FLAGS), replacement="product"),
Rule(pattern=re.compile(r"\bsaku\b", FLAGS), replacement="pocket"),
Rule(pattern=re.compile(r"\bmanset\b", FLAGS), replacement="cuff"),
Rule(pattern=re.compile(r"\bperalatan\b", FLAGS), replacement="equipment"),
Rule(pattern=re.compile(r"\botomatis\b", FLAGS), replacement="automatic"),
Rule(pattern=re.compile(r"\bspidol\b", FLAGS), replacement="marker"),
Rule(pattern=re.compile(r"\blensa\b", FLAGS), replacement="lens"),
Rule(pattern=re.compile(r"\bjepitan\b", FLAGS), replacement="tongs"),
Rule(pattern=re.compile(r"\bempat\b", FLAGS), | |
A 24 26.593 32.649 15.425 0.69 9.83 C
ATOM 115 N ASP A 25 29.282 30.981 16.182 1.00 6.26 N
ATOM 116 CA ASP A 25 30.170 29.863 15.877 1.00 6.70 C
ATOM 117 C ASP A 25 30.293 28.915 17.070 1.00 5.34 C
ATOM 118 O ASP A 25 30.261 27.691 16.913 1.00 5.81 O
ATOM 119 CB ASP A 25 31.553 30.374 15.467 1.00 9.38 C
ATOM 120 N GLU A 26 30.415 29.491 18.262 0.67 4.76 N
ATOM 121 CA GLU A 26 30.516 28.708 19.488 0.67 4.83 C
ATOM 122 C GLU A 26 29.195 28.010 19.790 0.67 4.62 C
ATOM 123 O GLU A 26 29.175 26.864 20.246 0.67 4.93 O
ATOM 124 CB GLU A 26 30.920 29.601 20.663 0.67 5.41 C
ATOM 125 N VAL A 27 28.094 28.707 19.527 1.00 4.54 N
ATOM 126 CA VAL A 27 26.766 28.144 19.737 1.00 5.02 C
ATOM 127 C VAL A 27 26.514 27.000 18.761 1.00 4.94 C
ATOM 128 O VAL A 27 25.883 26.003 19.109 1.00 5.05 O
ATOM 129 CB VAL A 27 25.667 29.209 19.566 1.00 5.29 C
ATOM 130 N ARG A 28 27.012 27.155 17.539 1.00 5.20 N
ATOM 131 CA ARG A 28 26.880 26.126 16.515 1.00 5.54 C
ATOM 132 C ARG A 28 27.724 24.906 16.863 1.00 4.76 C
ATOM 133 O ARG A 28 27.311 23.770 16.630 1.00 5.46 O
ATOM 134 CB ARG A 28 27.288 26.671 15.145 1.00 6.82 C
ATOM 135 N THR A 29 28.908 25.147 17.418 1.00 4.62 N
ATOM 136 CA THR A 29 29.787 24.058 17.829 1.00 5.38 C
ATOM 137 C THR A 29 29.183 23.294 19.005 1.00 4.39 C
ATOM 138 O THR A 29 29.254 22.064 19.065 1.00 5.02 O
ATOM 139 CB THR A 29 31.182 24.575 18.224 1.00 7.77 C
ATOM 140 N LEU A 30 28.579 24.032 19.931 1.00 4.10 N
ATOM 141 CA LEU A 30 27.937 23.429 21.093 1.00 4.03 C
ATOM 142 C LEU A 30 26.699 22.638 20.684 1.00 4.40 C
ATOM 143 O LEU A 30 26.424 21.569 21.231 1.00 4.33 O
ATOM 144 CB LEU A 30 27.563 24.502 22.117 1.00 4.41 C
ATOM 145 N GLN A 31 25.956 23.171 19.719 1.00 4.65 N
ATOM 146 CA GLN A 31 24.760 22.507 19.214 1.00 5.33 C
ATOM 147 C GLN A 31 25.128 21.239 18.451 1.00 5.02 C
ATOM 148 O GLN A 31 24.432 20.227 18.538 1.00 5.56 O
ATOM 149 CB GLN A 31 23.961 23.450 18.314 1.00 6.58 C
ATOM 150 N GLU A 32 26.224 21.304 17.702 1.00 5.72 N
ATOM 151 CA GLU A 32 26.705 20.153 16.949 1.00 6.90 C
ATOM 152 C GLU A 32 27.211 19.070 17.894 1.00 5.82 C
ATOM 153 O GLU A 32 26.996 17.879 17.663 1.00 6.63 O
ATOM 154 CB GLU A 32 27.816 20.568 15.982 1.00 10.01 C
ATOM 155 N ASN A 33 27.881 19.491 18.963 1.00 5.36 N
ATOM 156 CA ASN A 33 28.380 18.557 19.964 1.00 5.56 C
ATOM 157 C ASN A 33 27.239 17.898 20.732 1.00 5.07 C
ATOM 158 O ASN A 33 27.291 16.706 21.041 1.00 6.19 O
ATOM 159 CB ASN A 33 29.329 19.264 20.933 1.00 6.47 C
ATOM 160 N LEU A 34 26.207 18.681 21.030 1.00 4.51 N
ATOM 161 CA LEU A 34 25.036 18.173 21.734 1.00 4.72 C
ATOM 162 C LEU A 34 24.248 17.208 20.854 1.00 4.80 C
ATOM 163 O LEU A 34 23.731 16.198 21.332 1.00 4.90 O
ATOM 164 CB LEU A 34 24.138 19.327 22.184 1.00 4.81 C
ATOM 165 N HIS A 35 24.161 17.528 19.567 1.00 5.03 N
ATOM 166 CA HIS A 35 23.463 16.676 18.611 1.00 5.85 C
ATOM 167 C HIS A 35 24.218 15.369 18.399 1.00 5.91 C
ATOM 168 O HIS A 35 23.612 14.304 18.272 1.00 6.51 O
ATOM 169 CB HIS A 35 23.283 17.402 17.276 1.00 7.35 C
ATOM 170 N GLN A 36 25.544 15.458 18.365 1.00 5.86 N
ATOM 171 CA GLN A 36 26.385 14.279 18.203 1.00 7.12 C
ATOM 172 C GLN A 36 26.310 13.391 19.440 1.00 5.57 C
ATOM 173 O GLN A 36 26.294 12.164 19.336 1.00 6.15 O
ATOM 174 CB GLN A 36 27.835 14.686 17.934 1.00 10.81 C
ATOM 175 N LEU A 37 26.261 14.020 20.611 1.00 4.79 N
ATOM 176 CA LEU A 37 26.156 13.290 21.868 1.00 5.02 C
ATOM 177 C LEU A 37 24.795 12.613 21.990 1.00 4.81 C
ATOM 178 O LEU A 37 24.692 11.490 22.485 1.00 5.11 O
ATOM 179 CB LEU A 37 26.386 14.228 23.055 1.00 5.48 C
ATOM 180 N MET A 38 23.754 13.303 21.535 1.00 5.10 N
ATOM 181 CA MET A 38 22.401 12.760 21.565 0.60 5.87 C
ATOM 182 C MET A 38 22.262 11.601 20.584 1.00 5.55 C
ATOM 183 O MET A 38 21.594 10.607 20.873 1.00 5.83 O
ATOM 184 CB MET A 38 21.378 13.850 21.241 0.60 7.11 C
ATOM 185 N HIS A 39 22.897 11.736 19.424 1.00 5.90 N
ATOM 186 CA HIS A 39 22.871 10.689 18.410 1.00 6.60 C
ATOM 187 C HIS A 39 23.647 9.464 18.879 1.00 6.76 C
ATOM 188 O HIS A 39 23.242 8.328 18.631 1.00 7.35 O
ATOM 189 CB HIS A 39 23.447 11.204 17.090 1.00 7.90 C
ATOM 190 N GLU A 40 24.765 9.703 19.558 1.00 6.24 N
ATOM 191 CA GLU A 40 25.583 8.621 20.092 1.00 7.45 C
ATOM 192 C GLU A 40 24.859 7.903 21.225 1.00 6.69 C
ATOM 193 O GLU A 40 24.937 6.680 21.348 1.00 7.99 O
ATOM 194 CB GLU A 40 26.929 9.158 20.583 1.00 8.91 C
ATOM 195 N TYR A 41 24.153 8.672 22.048 1.00 6.31 N
ATOM 196 CA TYR A 41 23.389 8.109 23.156 1.00 6.52 C
ATOM 197 C TYR A 41 22.206 7.298 22.641 1.00 6.93 C
ATOM 198 O TYR A 41 21.872 6.249 23.194 1.00 7.89 O
ATOM 199 CB TYR A 41 22.900 9.219 24.089 1.00 6.84 C
ATOM 200 N PHE A 42 21.574 7.791 21.581 1.00 7.44 N
ATOM 201 CA PHE A 42 20.448 7.095 20.968 1.00 9.29 C
ATOM 202 C PHE A 42 20.907 5.812 20.283 1.00 11.50 C
ATOM 203 O PHE A 42 20.219 4.792 20.331 1.00 13.50 O
ATOM 204 CB PHE A 42 19.739 8.004 19.962 1.00 9.49 C
ATOM 205 N GLN A 43 22.072 5.872 19.646 1.00 11.97 N
ATOM 206 CA GLN A 43 22.637 4.711 18.968 1.00 13.21 C
ATOM 207 C GLN A 43 23.083 3.647 19.966 1.00 16.37 C
ATOM 208 O GLN A 43 22.917 2.451 19.727 1.00 17.39 O
ATOM 209 CB GLN A 43 23.813 5.127 18.082 1.00 11.79 C
ATOM 210 N GLN A 44 23.652 4.091 21.082 1.00 19.32 N
ATOM 211 CA GLN A 44 24.109 3.178 22.125 1.00 22.77 C
ATOM 212 C GLN A 44 22.935 2.498 22.821 1.00 25.16 C
ATOM 213 O GLN A 44 23.114 1.528 23.557 1.00 27.03 O
ATOM 214 CB GLN A 44 24.968 3.921 23.150 1.00 23.69 C
TER
"""
tst_01_start_lines = """
CRYST1 100.000 100.000 100.000 90.00 90.00 90.00 P 1
ATOM 1 N SER A 2 34.406 14.074 39.193 1.00 25.20 N
ATOM 2 CA SER A 2 35.650 14.676 38.720 1.00 23.89 C
ATOM 3 C SER A 2 35.652 14.973 37.209 1.00 22.90 C
ATOM 4 O SER A 2 36.003 16.085 36.813 1.00 23.08 O
ATOM 5 CB SER A 2 36.846 13.786 39.085 1.00 23.11 C
ATOM 6 OG SER A 2 36.688 13.224 40.377 1.00 21.13 O
ATOM 7 N PRO A 3 35.271 14.008 36.359 1.00 22.39 N
ATOM 8 CA PRO A 3 35.247 14.320 34.919 1.00 19.74 C
ATOM 9 C PRO A 3 34.131 15.273 34.536 1.00 13.82 C
ATOM 10 O PRO A 3 34.335 16.145 33.682 1.00 12.80 O
ATOM 11 CB PRO A 3 35.078 12.943 34.264 1.00 23.07 C
ATOM 12 CG PRO A 3 34.365 12.142 35.278 1.00 24.21 C
ATOM 13 CD PRO A 3 34.900 12.599 36.604 1.00 23.79 C
ATOM 14 N LEU A 4 32.953 15.140 35.151 1.00 11.29 N
ATOM 15 CA LEU A 4 31.842 16.029 34.822 | |
import os
import random
from pygame.locals import *
from OpenGL.GLU import *
from ModelLoader_UserInterface import*
from ModelLoader_ObjLoader import*
UI = UserInterface()
class Model_main:
def __init__(self):
self.MDL_list = 0
self.MDL = []
self.MDLH = []
self.MDLS = []
self.app = False
self.pos = []
self.LoadEnd = False
def Select_Detection(self, pos):
if UI.Mdl_list>0 and 220 < pos[0] < 250 and 45 < pos[1] < 211:
if 45 < pos[1] < 75 and UI.Mdl_list>=1:
UI.LP_Mdl_Del = 0
elif 78 < pos[1] < 110 and UI.Mdl_list>=2:
UI.LP_Mdl_Del = 1
elif 113 < pos[1] < 145 and UI.Mdl_list>=3:
UI.LP_Mdl_Del = 2
elif 145 < pos[1] < 177 and UI.Mdl_list>=4:
UI.LP_Mdl_Del = 3
elif 178 < pos[1] < 210 and UI.Mdl_list==5:
UI.LP_Mdl_Del = 4
else:
UI.LP_Mdl_Del = -1
else:
UI.LP_Mdl_Del = -1
def EnvRot(self, rx, ry):
if rx==0 and ry==0:
UI.ortho = 1
elif rx==270 and ry==0:
UI.ortho = 3
elif rx==0 and ry==90:
UI.ortho = 7
elif rx==0 and ry==270:
UI.ortho = 9
else:
UI.ortho = 0
def Rot_clear(self, rot):
final = rot
if rot>360 or rot<-360:
n = rot/360
final = rot-360*int(n)
return final
def LoadOBJ_internal(self):
for i in range(len(UI.MdlH)):
self.MDLH.append(OBJ("ext/Internal/", UI.MdlH[i]))
self.MDLH[i].create_gl_list()
UI.MdlH_TexOn.append(self.MDLH[i].Tex_ON)
UI.MdlH_Alpha.append(1)
UI.MdlH_TexFile.append(self.MDLH[i].file)
UI.MdlH_RGBA.append([int(self.MDLH[i].color[0]*255),int(self.MDLH[i].color[1]*255),int(self.MDLH[i].color[2]*255),self.MDLH[i].color[3]])
UI.MdlH_POS.append([self.MDLH[i].center[0], self.MDLH[i].center[1],self.MDLH[i].center[2]])
UI.MdlH_POS_ORI.append([self.MDLH[i].center[0], self.MDLH[i].center[1],self.MDLH[i].center[2]])
UI.MdlH_DEG.append([0,0,0])
UI.MdlH_SCL.append([1,1,1])
def LoadOBJ(self):
self.MDL.clear()
self.MDL_list = UI.Mdl_list
for i in range(self.MDL_list):
if i==0 and UI.int:
fdir = 'ext/Internal/'
else:
fdir = 'ext/'
self.MDL.append(OBJ(fdir, UI.Mdl[i]))
self.MDL[i].create_gl_list()
if UI.LP_Mdl_Del==-1 and self.MDL_list-1==i:
UI.Mdl_TexFile.append(self.MDL[i].file)
UI.Mdl_TexOn.append(self.MDL[i].Tex_ON)
UI.Mdl_POS.append([self.MDL[i].center[0], self.MDL[i].center[1],self.MDL[i].center[2]])
UI.Mdl_POS_ORI.append([self.MDL[i].center[0], self.MDL[i].center[1], self.MDL[i].center[2]])
UI.Mdl_RGBA.append([int(self.MDL[i].color[0]*255),int(self.MDL[i].color[1]*255),int(self.MDL[i].color[2]*255),self.MDL[i].color[3]])
UI.Mdl_DEG.append([0,0,0])
UI.Mdl_SCL.append([1,1,1])
UI.Mdl_Alpha.append(1)
UI.LP_Mdl_Slc = self.MDL_list-1
if UI.int:
UI.LP_intMdl_Slc = -1
if UI.Mdl_list>1:
UI.LP_Hrc_Slc = -1
def RELoadOBJ(self):
if UI.int and UI.LP_Mdl_Slc==0:
if UI.LP_Hrc_Slc==0:
for i in range(len(self.MDLH)):
self.MDLH[i].ReLoad = True
else:
self.MDLH[UI.LP_intMdl_Slc].ReLoad = True
else:
self.MDL[UI.LP_Mdl_Slc].ReLoad = True
if UI.RP_Slc==6:
if UI.int and UI.LP_Mdl_Slc==0:
clr = UI.MdlH_RGBA[UI.LP_intMdl_Slc]
else:
clr = UI.Mdl_RGBA[UI.LP_Mdl_Slc]
if UI.int and UI.LP_Mdl_Slc==0:
if UI.LP_Hrc_Slc==0:
for i in range(len(self.MDLH)):
clr = UI.MdlH_RGBA[i]
if self.MDLH[i].color_new:
self.MDLH[i].color = clr[0] / 255, clr[1] / 255, clr[2] / 255, clr[3]
self.MDLH[i].create_gl_list()
else:
self.MDLH[i].create_gl_list()
clr[0], clr[1], clr[2], clr[3] = int(self.MDLH[i].color[0] * 255), int(self.MDLH[i].color[1] * 255), int(self.MDLH[i].color[2] * 255), self.MDLH[i].color[3]
elif self.MDLH[UI.LP_intMdl_Slc].color_new:
self.MDLH[UI.LP_intMdl_Slc].color = clr[0] / 255, clr[1] / 255, clr[2] / 255, clr[3]
self.MDLH[UI.LP_intMdl_Slc].create_gl_list()
else:
self.MDLH[UI.LP_intMdl_Slc].create_gl_list()
clr[0], clr[1], clr[2], clr[3] = int(self.MDLH[UI.LP_intMdl_Slc].color[0] * 255), int(self.MDLH[UI.LP_intMdl_Slc].color[1] * 255), int(self.MDLH[UI.LP_intMdl_Slc].color[2] * 255), self.MDLH[UI.LP_intMdl_Slc].color[3]
else:
if self.MDL[UI.LP_Mdl_Slc].color_new:
self.MDL[UI.LP_Mdl_Slc].color = clr[0] / 255, clr[1] / 255, clr[2] / 255, clr[3]
self.MDL[UI.LP_Mdl_Slc].create_gl_list()
else:
self.MDL[UI.LP_Mdl_Slc].create_gl_list()
clr[0], clr[1], clr[2], clr[3] = int(self.MDL[UI.LP_Mdl_Slc].color[0] * 255), int(self.MDL[UI.LP_Mdl_Slc].color[1] * 255), int(self.MDL[UI.LP_Mdl_Slc].color[2] * 255), self.MDL[UI.LP_Mdl_Slc].color[3]
elif UI.RP_Slc==7:
if UI.int and UI.LP_Mdl_Slc==0:
if UI.LP_Hrc_Slc==0:
for i in range(len(self.MDLH)):
self.MDLH[i].Alpha = UI.MdlH_Alpha[i]
self.MDLH[i].Tex_ON = UI.MdlH_TexOn[i]
self.MDLH[i].create_gl_list()
else:
self.MDLH[UI.LP_intMdl_Slc].Alpha = UI.MdlH_Alpha[UI.LP_intMdl_Slc]
self.MDLH[UI.LP_intMdl_Slc].Tex_ON = UI.MdlH_TexOn[UI.LP_intMdl_Slc]
self.MDLH[UI.LP_intMdl_Slc].create_gl_list()
else:
self.MDL[UI.LP_Mdl_Slc].Alpha = UI.Mdl_Alpha[UI.LP_Mdl_Slc]
self.MDL[UI.LP_Mdl_Slc].Tex_ON = UI.Mdl_TexOn[UI.LP_Mdl_Slc]
self.MDL[UI.LP_Mdl_Slc].create_gl_list()
def LoadOBJ_screen(self):
props = ["Leave2.obj","Leave.obj","Leave1.obj","Leave2.obj","Leave.obj","Leave1.obj","Leave2.obj","Leave.obj","Leave1.obj","Leave2.obj",
"Leave.obj","Leave1.obj","Leave2.obj","Leave.obj","Leave1.obj","Leave2.obj","Leave.obj","Leave1.obj","Leave4.obj","Leave3.obj"]
for i in range(len(props)):
self.MDLS.append(OBJ("ext/Internal/", props[i]))
if i == len(props)-1:
self.MDLS[i].Alpha = 0
self.MDLS[i].create_gl_list()
if i<len(props)-2:
x = random.randint(-5,5)
y = random.randint(-20,0)
z = random.randint(5,10)
self.pos.append([x, y, z])
else:
self.pos.append([0,-20,8,0])
self.MDLS[-1].ReLoad = True
def LoadScreen(self):
glClearColor(0,0,0, 1)
Delete_list = []
for i in range(len(self.MDLS)):
if Delete_list and len(self.MDLS)==2:
break
pos = self.pos[i]
if pos[1]>-48:
glPushMatrix()
glTranslate(pos[0], pos[1], -20)
glRotate(pos[1] * pos[2]/100, 0, 0, 1)
glRotate(pos[1] * pos[2], 0, 1, 0)
glCallList(self.MDLS[i].gl_list)
pos[1] -= pos[2]/500
glPopMatrix()
else:
Delete_list.append(i)
if Delete_list:
if len(Delete_list)==len(self.MDLS):
del self.MDLS[0:len(self.MDLS)-2]
if len(self.MDLS)==2:
self.MDLS[-1].Alpha +=0.005
self.MDLS[-1].create_gl_list()
for i in range(-2,0):
pos = self.pos[i]
if pos[1]<=-48:
glPushMatrix()
glTranslate(pos[0], pos[1], -20)
glRotate(pos[1] * pos[2] / 100, 0, 0, 1)
glRotate(pos[1] * pos[2], 0, 1, 0)
glCallList(self.MDLS[i].gl_list)
glPopMatrix()
if len(self.MDLS)==2:
pos[3] = 1
elif len(self.MDLS)==2 and pos[1]<-30 and pos[3]==1:
glPushMatrix()
glTranslate(pos[0], pos[1], -20)
glRotate(pos[1] * pos[2] / 100, 0, 0, 1)
glRotate(pos[1] * pos[2], 0, 1, 0)
glCallList(self.MDLS[i].gl_list)
glPopMatrix()
elif len(self.MDLS) == 2 and pos[1] >-40 and pos[3]==2:
y = pos[1]-(pos[1]+30)
glPushMatrix()
glTranslate(pos[0], y, -20)
glRotate(pos[1] * pos[2] / 100, 0, 0, 1)
glRotate(pos[1] * pos[2], 0, 1, 0)
glCallList(self.MDLS[i].gl_list)
glPopMatrix()
elif pos[3] == 3:
y = pos[1] - (pos[1] + 46)
glPushMatrix()
glTranslate(0, y, -20)
glScale(1.4, 1.4, 1.4)
glRotate(pos[2], 0, 1, 0)
glCallList(self.MDLS[i].gl_list)
glPopMatrix()
glColor3ub(212, 175, 55)
glPushMatrix()
glScalef(0.15, 0.1 / 2, 1)
glTranslatef(-480, -200, -80)
for ch in "MODEL LOADER":
glutStrokeCharacter(GLUT_STROKE_ROMAN, ctypes.c_int(ord(ch)))
glPopMatrix()
glWindowPos2f(530, 280)
for ch in "<NAME>":
glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, ctypes.c_int(ord(ch)))
if self.MDLS[-1].Alpha>0.03:
pos[1] += pos[2] / 50
if pos[1]>0.3:
pos[3] = 3
pos[2] += 0.8
self.LoadEnd = True
elif pos[1]>=-30:
pos[3] = 2
def main(self):
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
pygame.mixer.init()
glutInit()
display = (1250, 750)
pygame.display.set_mode(display, DOUBLEBUF | OPENGL)
pygame.display.set_caption('MODEL LOADER by <NAME> BS18110173')
glViewport(0, 0, display[0], display[1])
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(90, (display[0] / display[1]), 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
self.LoadOBJ_screen()
self.LoadOBJ_internal()
self.LoadOBJ()
shift = ax = ay = az = roller = crop = False
tsf = rlr = -1
rx, ry, rz = 0, 0, 0
tx, ty, tz = 0, 0, 0
sx, sy, sz = 1, 1, 1
xrot, yrot = 0,0
ratx, raty = [0,0], [0,0]
while True:
if UI.exit==2:
pygame.quit()
quit()
Cursor = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
elif not self.app:
if self.LoadEnd:
if event.type == pygame.KEYDOWN or event.type == MOUSEBUTTONDOWN:
self.app = True
self.MDLS.clear()
else:
break
else:
break
elif UI.exit==1:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN or event.key == pygame.K_KP_ENTER:
UI.exit = 2
break
elif event.key == K_ESCAPE:
UI.exit = 0
UI.RP_Slc = -1
break
elif event.type == MOUSEBUTTONDOWN:
UI.exit = 0
UI.RP_Slc = -1
break
elif event.type == MOUSEBUTTONDOWN:
if UI.image[3]:
UI.image[3]=False
if event.button == 1:
if 1200<Cursor[0]<1243 and 5<Cursor[1]<520: # RightPanel
if 5<Cursor[1]<50:
UI.RP_Slc = 0
UI.exit = 1
elif 55<Cursor[1]<100:
UI.RP_Slc = 1
elif 110<Cursor[1]<155:
UI.RP_Slc = 2
elif 160<Cursor[1]<205 and UI.Mdl:
UI.RP_Slc = 3
elif 215<Cursor[1]<260 and UI.Mdl:
if not UI.LP_Hrc_Slc == 0:
UI.RP_Slc = 4
elif 265<Cursor[1]<310 and UI.Mdl:
if not UI.LP_Hrc_Slc == 0:
UI.RP_Slc = 5
elif 320<Cursor[1]<365 and UI.Mdl:
UI.RP_Slc = 6
elif 370<Cursor[1]<415 and UI.Mdl:
UI.RP_Slc = 7
elif 425<Cursor[1]<470:
UI.RP_Slc = 8
elif 475<Cursor[1]<525:
UI.RP_Slc = 9
elif 5<Cursor[0]<211 and 45<Cursor[1]<211:
if 45<Cursor[1]<75:
UI.LP_Mdl_Slc = 0
if UI.int:
UI.LP_Hrc_Slc = 0
elif 78<Cursor[1]<110 and UI.Mdl_list>=2:
UI.LP_Mdl_Slc = 1
UI.LP_intMdl_Slc = UI.LP_Hrc_Slc = -1
elif 113 < Cursor[1] < 145 and UI.Mdl_list>=3:
UI.LP_Mdl_Slc = 2
UI.LP_intMdl_Slc = UI.LP_Hrc_Slc = -1
elif 145 < Cursor[1] < 177 and UI.Mdl_list>=4:
UI.LP_Mdl_Slc = 3
UI.LP_intMdl_Slc = UI.LP_Hrc_Slc = -1
elif 178 < Cursor[1] < 210 and UI.Mdl_list>=5:
UI.LP_Mdl_Slc = 4
UI.LP_intMdl_Slc = UI.LP_Hrc_Slc = -1
elif 5 < Cursor[0] < 250 and 472 < Cursor[1] < 489 and UI.int and UI.LP_Mdl_Slc==0:
if 5<Cursor[0]<85:
UI.LP_Hrc_Slc = 0
UI.LP_intMdl_Slc = -1
elif 89<Cursor[0]<166:
UI.LP_Hrc_Slc = 1
elif 171<Cursor[0]<250:
UI.LP_Hrc_Slc = 2
elif 5 < Cursor[0] < 250 and 493 < Cursor[1] < 747 and UI.int and UI.LP_Mdl_Slc == 0:
if UI.LP_Hrc_Slc == 1:
if 5 < Cursor[0] < 128 and 493 < Cursor[1] < 749:
if 493 < Cursor[1] < 518:
UI.LP_intMdl_Slc = 0
elif 519 < Cursor[1] < 544:
UI.LP_intMdl_Slc = 1
elif 545 < Cursor[1] < 570:
UI.LP_intMdl_Slc = 2
elif 571 < Cursor[1] < 596:
UI.LP_intMdl_Slc = 3
elif 597 < Cursor[1] < 622:
UI.LP_intMdl_Slc = 4
elif 623 < Cursor[1] < 649:
UI.LP_intMdl_Slc = 5
elif 650 < Cursor[1] < 675:
UI.LP_intMdl_Slc = 6
elif 676 < Cursor[1] < 700:
UI.LP_intMdl_Slc = 7
elif 701 < Cursor[1] < 727:
UI.LP_intMdl_Slc = 8
elif 728 < Cursor[1] < 749:
UI.LP_intMdl_Slc = 9
elif 129 < Cursor[0] < 250 and 493 < Cursor[1] < 727:
if 493 < Cursor[1] < 518:
UI.LP_intMdl_Slc = 10
elif 519 < Cursor[1] < 544:
UI.LP_intMdl_Slc = 11
elif 545 < Cursor[1] < 570:
UI.LP_intMdl_Slc = 12
elif 571 < Cursor[1] < 596:
UI.LP_intMdl_Slc = 13
elif 597 < Cursor[1] < 622:
UI.LP_intMdl_Slc = 14
elif 623 < Cursor[1] < 649:
UI.LP_intMdl_Slc = 15
elif 650 < Cursor[1] < 675:
UI.LP_intMdl_Slc = 16
elif 676 < Cursor[1] < 700:
UI.LP_intMdl_Slc = 17
elif 701 < Cursor[1] < 727:
UI.LP_intMdl_Slc = 18
elif UI.LP_Hrc_Slc == 2:
if 5 < Cursor[0] < 250 and 493 < | |
Only
'expirationHours': 36, # time from now to expire
'retentionHours': 12 # time from now to expire
}
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INVALID_INPUT_VV_PATTERN - Invalid volume pattern specified
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_SET - The set does not exist
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- EMPTY_SET - The set is empty
:raises: :class:`~hpe3parclient.exceptions.HTTPServiceUnavailable`
- VV_LIMIT_REACHED - Maximum number of volumes reached
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_VOL - The storage volume does not exist
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- VV_IS_BEING_REMOVED - The volume is being removed
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_VV_READONLY_TO_READONLY_SNAP - Creating a
read-only copy from a read-only volume is not permitted
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- NO_SNAP_CPG - No snapshot CPG has been configured for the volume
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_DUP_NAME - Invalid input (duplicate name).
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_VV_SNAP_PARENT_SAME_BASE - Two parent
snapshots share thesame base volume
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - Invalid
operation. Online copyis in progress
:raises: :class:`~hpe3parclient.exceptions.HTTPServiceUnavailable`
- VV_ID_LIMIT_REACHED - Max number of volumeIDs has been reached
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_VOLUME - The volume does not exists
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- VV_IN_STALE_STATE - The volume is in a stale state.
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- VV_NOT_STARTED - Volume is not started
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- VV_UNAVAILABLE - The volume is not accessible
:raises: :class:`~hpe3parclient.exceptions.HTTPServiceUnavailable`
- SNAPSHOT_LIMIT_REACHED - Max number of snapshots has been reached
:raises: :class:`~hpe3parclient.exceptions.HTTPServiceUnavailable`
- CPG_ALLOCATION_WARNING_REACHED - The CPG has reached the
allocation warning
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- INV_OPERATION_VV_VOLUME_CONV_IN_PROGRESS - Invalid operation: VV
conversion is in progress.
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_VV_CLEANUP_IN_PROGRESS - Internal volume cleanup is
in progress.
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_VV_PEER_VOLUME - Cannot modify a peer volume.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- INV_OPERATION_VV_ONLINE_COPY_IN_PROGRESS - The volume is the
target of an online copy.
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_VV_INTERNAL_VOLUME - Illegal op on internal vol
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- EXISTENT_ID - An ID exists
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_VV_NOT_IN_NORMAL_STATE - Volume state is not normal
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- VV_IN_INCONSISTENT_STATE - Internal inconsistency error in vol
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_RETAIN_GT_EXPIRE - Retention time is greater than
expiration time.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_TIME - Invalid time specified.
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- INV_OPERATION_SNAPSHOT_NOT_SAME_TYPE - Some snapshots in the
volume set are read-only, some are read-write
"""
parameters = {'name': name}
if optional:
parameters = self._mergeDict(parameters, optional)
info = {'action': 'createSnapshot',
'parameters': parameters}
response, body = self.http.post('/volumesets/%s' % copyOfName,
body=info)
return body
# QoS Priority Optimization methods
def setQOSRule(self, set_name, max_io=None, max_bw=None):
"""
Set a QOS Rule on a volume set
:param set_name: the volume set name for the rule.
:type set_name: str
:param max_io: the maximum IOPS value
:type max_io: int
:param max_bw: The maximum Bandwidth
:type max_bw:
"""
cmd = ['setqos']
if max_io is not None:
cmd.extend(['-io', '%s' % max_io])
if max_bw is not None:
cmd.extend(['-bw', '%sM' % max_bw])
cmd.append('vvset:' + set_name)
result = self._run(cmd)
if result:
msg = result[0]
else:
msg = None
if msg:
if 'no matching QoS target found' in msg:
raise exceptions.HTTPNotFound(error={'desc': msg})
else:
raise exceptions.SetQOSRuleException(message=msg)
def queryQoSRules(self):
"""
Get QoS Rules
:returns: Array of QoS Rules
"""
response, body = self.http.get('/qos')
return body
def queryQoSRule(self, targetName, targetType='vvset'):
"""
Query a QoS rule
:param targetType: target type is vvset or sys
:type targetType: str
:param targetName: the name of the target. When targetType is sys,
target name must be sys:all_others.
:type targetName: str
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_QOS_RULE - QoS rule does not exist.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
"""
response, body = self.http.get('/qos/%(targetType)s:%(targetName)s' %
{'targetType': targetType,
'targetName': targetName})
return body
def createQoSRules(self, targetName, qosRules,
target_type=TARGET_TYPE_VVSET):
"""
Create QOS rules
The QoS rule can be applied to VV sets. By using sys:all_others,
you can apply the rule to all volumes in the system for which no
QoS rule has been defined.
ioMinGoal and ioMaxLimit must be used together to set I/O limits.
Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
to set to 2 (zero), and vice versa. They cannot be set to
'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
limit), then bwMinGoalOP must also be set to 2.
If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
set to 1 (zero), then bwMinGoalOP must also be set to 1.
The ioMinGoalOP and ioMaxLimitOP fields take precedence over
the ioMinGoal and ioMaxLimit fields.
The bwMinGoalOP and bwMaxLimitOP fields take precedence over
the bwMinGoalKB and bwMaxLimitKB fields
:param target_type: Type of QoS target, either enum
TARGET_TYPE_VVS or TARGET_TYPE_SYS.
:type target_type: enum
:param targetName: the name of the target object on which the QoS
rule will be created.
:type targetName: str
:param qosRules: QoS options
:type qosRules: dict
.. code-block:: python
qosRules = {
'priority': 2, # priority enum
'bwMinGoalKB': 1024, # bandwidth rate minimum goal in
# kilobytes per second
'bwMaxLimitKB': 1024, # bandwidth rate maximum limit in
# kilobytes per second
'ioMinGoal': 10000, # I/O-per-second minimum goal
'ioMaxLimit': 2000000, # I/0-per-second maximum limit
'enable': True, # QoS rule for target enabled?
'bwMinGoalOP': 1, # zero none operation enum, when set to
# 1, bandwidth minimum goal is 0
# when set to 2, the bandwidth mimumum
# goal is none (NoLimit)
'bwMaxLimitOP': 1, # zero none operation enum, when set to
# 1, bandwidth maximum limit is 0
# when set to 2, the bandwidth maximum
# limit is none (NoLimit)
'ioMinGoalOP': 1, # zero none operation enum, when set to
# 1, I/O minimum goal is 0
# when set to 2, the I/O minimum goal is
# none (NoLimit)
'ioMaxLimitOP': 1, # zero none operation enum, when set to
# 1, I/O maximum limit is 0
# when set to 2, the I/O maximum limit
# is none (NoLimit)
'latencyGoal': 5000, # Latency goal in milliseconds
'defaultLatency': False # Use latencyGoal or defaultLatency?
}
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_EXCEEDS_RANGE - Invalid input: number exceeds expected
range.
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_QOS_RULE - QoS rule does not exists.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ILLEGAL_CHAR - Illegal character in the input.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- EXISTENT_QOS_RULE - QoS rule already exists.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_MIN_GOAL_GRT_MAX_LIMIT - I/O-per-second maximum limit
should be greater than the minimum goal.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_BW_MIN_GOAL_GRT_MAX_LIMIT - Bandwidth maximum limit
should be greater than the mimimum goal.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_BELOW_RANGE - I/O-per-second limit is below range.
Bandwidth limit is below range.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- UNLICENSED_FEATURE - The system is not licensed for QoS.
"""
info = {'name': targetName,
'type': target_type}
info = self._mergeDict(info, qosRules)
response, body = self.http.post('/qos', body=info)
return body
def modifyQoSRules(self, targetName, qosRules, targetType='vvset'):
"""
Modify an existing QOS rules
The QoS rule can be applied to VV sets. By using sys:all_others,
you can apply the rule to all volumes in the system for which no
QoS rule has been defined.
ioMinGoal and ioMaxLimit must be used together to set I/O limits.
Similarly, bwMinGoalKB and bwMaxLimitKB must be used together.
If ioMaxLimitOP is set to 2 (no limit), ioMinGoalOP must also be
to set to 2 (zero), and vice versa. They cannot be set to
'none' individually. Similarly, if bwMaxLimitOP is set to 2 (no
limit), then bwMinGoalOP must also be set to 2.
If ioMaxLimitOP is set to 1 (no limit), ioMinGoalOP must also be
to set to 1 (zero) and vice versa. Similarly, if bwMaxLimitOP is
set to 1 (zero), then bwMinGoalOP must also be set to 1.
The ioMinGoalOP and ioMaxLimitOP fields take precedence over
the ioMinGoal and ioMaxLimit fields.
The bwMinGoalOP and bwMaxLimitOP fields take precedence over
the bwMinGoalKB and bwMaxLimitKB fields
:param targetName: the name of the target object on which the QoS
rule will be created.
:type targetName: str
:param targetType: Type of QoS target, either vvset or sys
:type targetType: str
:param qosRules: QoS options
:type qosRules: dict
.. code-block:: python
qosRules = {
'priority': 2, # priority enum
'bwMinGoalKB': | |
XXXXX XXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXX XXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXX
XXXXXXXXXX
XXXXXXXXXX
XX XXXXXXX XXXXXXXXXXX XXXX XX XXXXX XXXX XX XXXXXXXX XXXXXXX XXXXXX XXX XXX
XXXXXXXXXXXXXXXXXXX XXXX XXX XXXXXXXXXXXXX XXX XX XXX XXXX XXXX X XXXXXX
XXXXXXXXX XXX XXXXX XXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXX XXX XXXXXXXXXXXXXXX XXXXX XXXXX XXX
XXXXX XXXXXXXXX
XXXXXXXX
XXXXXXXX
XXXXX XXXXXXX XX XXX XXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXX XXXXXXX XXX
XXXXXXXXXXX XXXXXXXXXXXXXX XXX XXXX XXXX XX XXXXXXXXXXXX XXXXXXXXX XXXXXXX XX
XXXXXXXX XXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXXXX XX
XX XXXXXXX XXX XXXXXXXXXXXX XXXXX XXXXXXXXXX
XXXX
XXXX
XXX XXXXXXXX XXXXXXXXXXXXXXXX XXX XXXXXX XXXXXX XX XXXXXXXX XXXX XXXXX
XXXXXXXXXXXXXXXXX XX X XXX XXXX XXX XXXXXXXXXXXXXXX XXXXXX XXX XXXXXXXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXXXXXX XXXXXXX XXXXXX XXX XXXXXXX
XXX XXXXXXXXXXX XXXXXXXXX XXXX XXXXXX XXX XXXXXX XX XXX XXXXXXX XXXXX XXXXXXXXX
XX XXXX XXXXXXXXXXXXXXXXX XXXXXXX XXXXXXXXXXX
XXXX XXXXX XXXXXXX X XXXXXXX XXXXXXXXXXXXX XXXX XXXXXXX XXX XXXXXX XXXXXXX
XXXXXXXXX XXX XXXXXXXXXXX XXXXXXXXX XXXXXXXXX XXXX XXXXXXXXXXXXXX XXXXX XXX
XXXXXX XX XXX XXXXXXX XXXX XXXXXXXXX XX XXX XXXXXXXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXX XXXX XXX XXX XXXXX XX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXX XXXX XX XXXX XXXXXXXXX XXXX XXXX XXXXX XXX XXXXXXXX XX XXXXX
XXXXXX XX XX XXXXX XXX XXX XXXXXX X XXXXXXXX XX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXX XXX XXX XXXXXXXXXXXXX
XX XXXXXXXXX XXXXXXXXXX XXXX XXXXXX XXXX XXXXXXX XX XXX XXX XXXXX XXXXXXXXX
XXXXXX X XXXX XXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX
XXXX XXXXXXXXXXXXXXXXX XXXXXX XXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX X XXXXXXX XXXX XXXX XXXXX
XXXX X XXXXXXXXXXXX
XXXXXXXXXXX X XXXXXXXX XX XXXXXXX XXX XXXXXXXXX XX XXXXXX XXXX XXXXXXXXX
XXX XXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX X XXXXXXXXX
XXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX
XX XXXX XXXXXXXX XXX XXX XXXX XXXX X XXXXXXXXXXXXXXX XXXXX XXX XXXX
XXXX XXXX X XXXXXXXXXXXXX XXXXXX XXXXXXX XXXX XXXXX XXXXX XXXXXXX XXX
X XXXXXXXXXX XXXXXXXXXXXXXX XXX XXXX XXXX XXXXXXX XXX X XXXXXXXXXXX
XX XXX XXXXXXX X XXXXXXXXXXXXXXX XX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXXX XXXXX
XXXXXXXXXXXXX XXX XXXX XXXX XXX XXXXXXXXXXXX XXXXXXXX XXXXXXX X
XXXXXXXX XXX XXXXXXXXXXX XXX XXX XXXXXXXXXXX XXX XXXX XXXXXX XXXXXXXX XXX
XXXXXX XXXXXX X XXXXXXXXXXXX XXXXXXXXXX
XX XXX XXXXX XXXXXXXX XX XXX XXX XXXX XXXXXX XXXXXXXXXXXX XX XXX XXX XXXXXX
XXXXXXXXXXXXXXXX XXXXXXX XXXX XXXXX XXXXX XXXXXXXX
XXX XXX XXXXXXXXXXXX XXXXXXXXX
XXXX XXXXXXXXXXX XXXXXX XXXX
XXXX XXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXXXXXXXX X X
X XXX
XXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
X XXX
X
XXXXXXXX XXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXX
XXXX XXXXXXX XXXXXXXXXXX XXX XXXXXXXX XXXXXXXXXX XXX XXXXXXX XXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXX
XXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXX
XXXX XXXXXXXXXXXX XXXXXX XXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXX
X XXXX XXXX XX XXXXXXXXX XXXX XXXXXX XX X XXXXX XXXX XXXXXXXXXX
X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXX
X XXXXX XXXX XX XXXX XXXX XXXXXX XXX XXX XXXX XXXX XXXX XXXX XX
X XXX XXXXX XXXXXXX XXXXXXXXXX XXXX XXXX XXXX XX XXX XXXX XXXX
X XXXXXX XX X XXXXXX XXX XX XXXXXXXX XX XXX XXXXXX
XXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXX
X XXXXXXXX XXXXX XX XXXXXXXXX XXXXX XXXXXX XX XXXXXXX
X XXXXXXXXXXXX XXXXX XX XXXXXX XXXXXXXXX XXXX XXX XXXXXX XXXXXX
X XXX XX XXXXXXXXX XXX XXXXX XXX XXXXXXXXXXX XX XXXX XXXX XXXXXX
X XXXX XXX XXXXXXXXX XX X XXXXXXXX XX XXX XXXXXXXXXX XXX
X XXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXX XXXXXXX XXX XXXX XXXXXXXX
XXXXXXXXXXXXXX X XXXX
XXXXXXXXXXXXXXXXXXXX X XXXX
X XXXXXXXX XX XXXXXXXXX XXXX XXXXXX XX X XXXXXX XXXXXXXXXX X XXXXXXXX
X XXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX X XXXX
X XXXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX
XXXXXX XXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXX X XXXXX X XXXXXXXXXX XXXXXX
X XXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXX XXXXX
XXX
X XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXX
X XX XXX XXXX XXXXXXX XX XXX XXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXX
XXX
XXXXXXX XXX XXX XX XXX XXXX XXXXXXX XX XXX XXXX XX X XXXXXX XXXXXX
XXXXXXX
XXX
XXXX X XXXXXXXX X XXXXXXXXXX XXXX
X XXXXXXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXXXXXX XXXXX
XXX
X XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
X XXX XXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXX XXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXX X XXXXXXXXXXXX X XXXXXXXXXX XXXX
X XXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX XXXXX
X XXX XXXX XX XXXX XXXXXX XXXX XXXXXXXX XX XXXX XXXX XXX XXXX XXXXX
X XXXXXX XX XX XXX XXXXXXXXXX XX XXXXXXXXX XX XXX XXXXXXXXX XXX XXXX
X XXXX XX XXXX XX XXX XXX
XXX XXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXXXX
XXXXXX XX XXX XXX XXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXX XXXXXX XX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXXX X XXXXXXXXXXXXXXX X XXXXXXXXXX XXXXX
X XXXXXXXXXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXXXXXXXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXXXXX X XXXX XXX XXXXX X XXXXXXXXXX XXXXXXXXXXXX
X XXXXXX XXXX XXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXXXXXXX XXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXX XXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXXXXX X XXXXXX XXXXXX X XXXXXXXXXX XXXXXX XXXXX
X XXXXXX XXXXX XXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXXXXXXX XXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXX XXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXXXXXX X XXXXXXXXXXXXXXXXXX X XXXXXXXXXX XXXXXX XXXXXX
X XXXXXX XXXX XXXXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX XX XXXX XXXXX XXX XXX XXXXXX XXXXXXX
X XXX XXXXXXXXX XXX XXXXXX XXXXX
XXX XXXXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXXXXXXX XXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXX XXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXXX XXXXXX XXXX
X XXXXXXXXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX
X XXXXX XXX XXXX XX XXXX XXXXXX XX XXXX XXXXX XXX XXXXXXXXXXXXXXXX
X XXXXXX XXXXXX XX XXXXXXXX XXXXXX XXXX XXXXXXX XXXXXXXX
XXX XXXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXXXXXXXXX XX XXXXXXXX XXXX XXXXXXXX
XXX
XXX XXXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXXXX XX XXXXXXXX XXXX XXXXXXXX
XXX
XXXXXXXXXX X XXXXXXXXXX XXXXXXXXX X XXXXXXXXXX XXXX XX XXXXXXXXXXX
X XXXXXXXXX XXXXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX
X XXXXXXXXX XXXXX XXX XXXX XX XXXX XXXXXX
XXX XXXXXXXXXXXXXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXXXXXXXX XXXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXXXXXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXXXXXXXX XXXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXXXXXXXXXXXXX X XXXXXXXXXX XXX XXXXX XXXXX XXXXXX X XXXXXXXXXX XXXXXXXXX XXXXXXX
X XXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX XXXXX
X XXX XXXX XX XXXX XXXXXX XXXXXXX XXX XXXX XXXXXX
XXX XXXXXXXXX XXXXX
XXX
XXXXX XXX XXXXXX XXXXXXXX XX XXXXXXXXXXXX XXX XXXXXXX XXX XXXXXX
XXX XXXXX XX XXXXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX XXXXXXXXXX
XXX
XXXXXXX XXX XXXXXX XXX XX X XXXXXX XXXXXX XXXXXXX
XXX
XXX X XXX X XXXXXXXXXX XXXX XX XXXXX
X XXXXX XX XXX XX XXX XXXXXXXXX XXXXX XX XXXXXXXXX XXX XXXXXXXXX XXXXX
X | |
<filename>boto3_type_annotations_with_docs/boto3_type_annotations/comprehend/client.py
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def batch_detect_dominant_language(self, TextList: List) -> Dict:
"""
Determines the dominant language of the input text for a batch of documents. For a list of languages that Amazon Comprehend can detect, see `Amazon Comprehend Supported Languages <https://docs.aws.amazon.com/comprehend/latest/dg/how-languages.html>`__ .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectDominantLanguage>`_
**Request Syntax**
::
response = client.batch_detect_dominant_language(
TextList=[
'string',
]
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'Languages': [
{
'LanguageCode': 'string',
'Score': ...
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **Languages** *(list) --*
One or more DominantLanguage objects describing the dominant languages in the document.
- *(dict) --*
Returns the code for the dominant language in the input text and the level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **LanguageCode** *(string) --*
The RFC 5646 language code for the dominant language. For more information about RFC 5646, see `Tags for Identifying Languages <https://tools.ietf.org/html/rfc5646>`__ on the *IETF Tools* web site.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document should contain at least 20 characters and must contain fewer than 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def batch_detect_entities(self, TextList: List, LanguageCode: str) -> Dict:
"""
Inspects the text of a batch of documents for named entities and returns information about them. For more information about named entities, see how-entities
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectEntities>`_
**Request Syntax**
::
response = client.batch_detect_entities(
TextList=[
'string',
],
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'Entities': [
{
'Score': ...,
'Type': 'PERSON'|'LOCATION'|'ORGANIZATION'|'COMMERCIAL_ITEM'|'EVENT'|'DATE'|'QUANTITY'|'TITLE'|'OTHER',
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **Entities** *(list) --*
One or more Entity objects, one for each entity detected in the document.
- *(dict) --*
Provides information about an entity.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **Type** *(string) --*
The entity's type.
- **Text** *(string) --*
The text of the entity.
- **BeginOffset** *(integer) --*
A character offset in the input text that shows where the entity begins (the first character is at position 0). The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **EndOffset** *(integer) --*
A character offset in the input text that shows where the entity ends. The offset returns the position of each UTF-8 code point in the string. A *code point* is the abstract character from a particular graphical representation. For example, a multi-byte UTF-8 character maps to a single code point.
- **ErrorList** *(list) --*
A list containing one object for each document that contained an error. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If there are no errors in the batch, the ``ErrorList`` is empty.
- *(dict) --*
Describes an error that occurred while processing a document in a batch. The operation returns on ``BatchItemError`` object for each document that contained an error.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **ErrorCode** *(string) --*
The numeric error code of the error.
- **ErrorMessage** *(string) --*
A text description of the error.
:type TextList: list
:param TextList: **[REQUIRED]**
A list containing the text of the input documents. The list can contain a maximum of 25 documents. Each document must contain fewer than 5,000 bytes of UTF-8 encoded characters.
- *(string) --*
:type LanguageCode: string
:param LanguageCode: **[REQUIRED]**
The language of the input documents. You can specify English (\"en\") or Spanish (\"es\"). All documents must be in the same language.
:rtype: dict
:returns:
"""
pass
def batch_detect_key_phrases(self, TextList: List, LanguageCode: str) -> Dict:
"""
Detects the key noun phrases found in a batch of documents.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/comprehend-2017-11-27/BatchDetectKeyPhrases>`_
**Request Syntax**
::
response = client.batch_detect_key_phrases(
TextList=[
'string',
],
LanguageCode='en'|'es'|'fr'|'de'|'it'|'pt'
)
**Response Syntax**
::
{
'ResultList': [
{
'Index': 123,
'KeyPhrases': [
{
'Score': ...,
'Text': 'string',
'BeginOffset': 123,
'EndOffset': 123
},
]
},
],
'ErrorList': [
{
'Index': 123,
'ErrorCode': 'string',
'ErrorMessage': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **ResultList** *(list) --*
A list of objects containing the results of the operation. The results are sorted in ascending order by the ``Index`` field and match the order of the documents in the input list. If all of the documents contain an error, the ``ResultList`` is empty.
- *(dict) --*
The result of calling the operation. The operation returns one object for each document that is successfully processed by the operation.
- **Index** *(integer) --*
The zero-based index of the document in the input list.
- **KeyPhrases** *(list) --*
One or more KeyPhrase objects, one for each key phrase detected in the document.
- *(dict) --*
Describes a key noun phrase.
- **Score** *(float) --*
The level of confidence that Amazon Comprehend has in the accuracy of the detection.
- **Text** *(string) --*
The text of a key noun phrase.
- **BeginOffset** *(integer) --*
A character offset in the input text that shows where the | |
fresh-refrigerated, plain, as purchased
20094: [], # Pasta, fresh-refrigerated, plain, cooked
20095: [
"Pasta",
"fresh-refrigerated spinach",
], # Pasta, fresh-refrigerated, spinach, as purchased
20096: [], # Pasta, fresh-refrigerated, spinach, cooked
20097: [], # Pasta, homemade, made with egg, cooked
20098: [], # Pasta, homemade, made without egg, cooked
20105: [], # Macaroni, vegetable, enriched, dry
20106: [], # Macaroni, vegetable, enriched, cooked
20109: [], # Noodles, egg, dry, enriched
20110: [], # Noodles, egg, enriched, cooked
20111: [], # Noodles, egg, spinach, enriched, dry
20112: [], # Noodles, egg, spinach, enriched, cooked
20113: [], # Noodles, chinese, chow mein
20114: [], # Noodles, japanese, soba, dry
20115: [], # Noodles, japanese, soba, cooked
20116: [], # Noodles, japanese, somen, dry
20117: [], # Noodles, japanese, somen, cooked
20118: [], # Noodles, flat, crunchy, Chinese restaurant
20120: [], # Pasta, dry, enriched
20121: [], # Pasta, cooked, enriched, without added salt
20124: [
"Pasta",
"whole-wheat dry",
], # Pasta, whole-wheat, dry (Includes foods for USDA's Food Distribution Program)
20125: [], # Pasta, whole-wheat, cooked (Includes foods for USDA's Food Distribution Program)
20126: [], # Spaghetti, spinach, dry
20127: [], # Spaghetti, spinach, cooked
20129: [], # Wheat flours, bread, unenriched
20130: [], # Barley flour or meal
20131: [], # Barley malt flour
20132: [], # Oat flour, partially debranned
20133: [], # Rice noodles, dry
20134: [], # Rice noodles, cooked
20135: [], # Pasta, whole grain, 51% whole wheat, remaining unenriched semolina, dry
20136: [], # Pasta, whole grain, 51% whole wheat, remaining unenriched semolina, cooked
20137: [], # Quinoa, cooked
20138: [], # Wheat, KAMUT khorasan, uncooked
20139: [], # Wheat, KAMUT khorasan, cooked
20140: [], # Spelt, uncooked
20141: [], # Spelt, cooked
20142: [], # Teff, uncooked
20143: [], # Teff, cooked
20310: [], # Noodles, egg, cooked, enriched, with added salt
20314: [], # Corn grain, white
20315: [], # Corn flour, whole-grain, blue (harina de maiz morado)
20316: [], # Corn flour, whole-grain, white
20317: [], # Corn flour, yellow, masa, enriched
20320: [], # Cornmeal, whole-grain, white
20321: [], # Pasta, cooked, enriched, with added salt
20322: [], # Cornmeal, degermed, enriched, white
20323: [], # Cornmeal, white, self-rising, bolted, plain, enriched
20324: [], # Cornmeal, white, self-rising, bolted, with wheat flour added, enriched
20325: [], # Cornmeal, white, self-rising, degermed, enriched
20330: [], # Hominy, canned, yellow
20345: [], # Rice, white, long-grain, regular, cooked, enriched, with salt
20381: [], # Wheat flour, white, all-purpose, enriched, calcium-fortified
20409: [], # Noodles, egg, dry, unenriched
20410: [], # Noodles, egg, unenriched, cooked, without added salt
20420: [], # Pasta, dry, unenriched
20421: [], # Pasta, cooked, unenriched, without added salt
20422: [], # Cornmeal, degermed, unenriched, yellow
20444: [
"Rice",
"white long-grain regular",
], # Rice, white, long-grain, regular, raw, unenriched
20445: [], # Rice, white, long-grain, regular, unenriched, cooked without salt
20446: [], # Rice, white, long-grain, parboiled, unenriched, dry
20447: [], # Rice, white, long-grain, parboiled, unenriched, cooked
20450: ["Rice", "white medium-grain"], # Rice, white, medium-grain, raw, unenriched
20451: [], # Rice, white, medium-grain, cooked, unenriched
20452: ["Rice", "white short-grain"], # Rice, white, short-grain, raw, unenriched
20453: [], # Rice, white, short-grain, cooked, unenriched
20466: [], # Semolina, unenriched
20481: [
"Wheat flour",
"all-purpose unenriched",
], # Wheat flour, white, all-purpose, unenriched
20510: [], # Noodles, egg, cooked, unenriched, with added salt
20521: [], # Pasta, cooked, unenriched, with added salt
20522: [], # Cornmeal, degermed, unenriched, white
20523: [], # Spaghetti, protein-fortified, cooked, enriched (n x 6.25)
20545: [], # Rice, white, long-grain, regular, cooked, unenriched, with salt
20581: [], # Wheat flour, white, all-purpose, enriched, unbleached
20622: [], # Spaghetti, protein-fortified, dry, enriched (n x 6.25)
20623: [], # Wheat flour, white (industrial), 9% protein, bleached, enriched
20624: [], # Wheat flour, white (industrial), 9% protein, bleached, unenriched
20628: [], # Wheat flour, white (industrial), 10% protein, bleached, enriched
20629: [], # Wheat flour, white (industrial), 10% protein, bleached, unenriched
20630: [], # Wheat flour, white (industrial), 10% protein, unbleached, enriched
20634: [], # Wheat flour, white (industrial), 11.5% protein, bleached, enriched
20635: [], # Wheat flour, white (industrial), 11.5% protein, bleached, unenriched
20636: [], # Wheat flour, white (industrial), 11.5% protein, unbleached, enriched
20640: [], # Wheat flour, white (industrial), 13% protein, bleached, enriched
20641: [], # Wheat flour, white (industrial), 13% protein, bleached, unenriched
20645: [], # Wheat flour, white (industrial), 15% protein, bleached, enriched
20646: [], # Wheat flour, white (industrial), 15% protein, bleached, unenriched
20647: [], # Millet flour
20648: [], # Sorghum flour, whole-grain
20649: [], # Wheat flour, whole-grain, soft wheat
20650: [], # Sorghum flour, refined, unenriched
20651: [], # Rice, brown, parboiled, cooked, UNCLE BENS
20652: [], # Pasta, whole grain, 51% whole wheat, remaining enriched semolina, cooked (Includes foods for USDA's Food Distribution Program)
20653: [], # Pasta, whole grain, 51% whole wheat, remaining enriched semolina, dry (Includes foods for USDA's Food Distribution Program)
20654: [], # Pasta, gluten-free, brown rice flour, cooked, TINKYADA
20655: [], # Pasta, gluten-free, corn flour and quinoa flour, cooked, ANCIENT HARVEST
20656: [], # Pasta, gluten-free, rice flour and rice bran extract, cooked, DE BOLES
20657: [], # Pasta, gluten-free, corn and rice flour, cooked
21003: [], # Fast foods, biscuit, with egg and bacon
21004: [], # Fast foods, biscuit, with egg and ham
21005: [], # Fast Foods, biscuit, with egg and sausage
21007: [], # Fast foods, biscuit, with egg, cheese, and bacon
21008: [], # Fast foods, biscuit, with ham
21009: [], # Fast foods, biscuit, with sausage
21010: [], # Fast foods, biscuit, with crispy chicken fillet
21012: [], # Fast foods, croissant, with egg, cheese, and bacon
21013: [], # Fast foods, croissant, with egg, cheese, and ham
21014: [], # Fast foods, croissant, with egg, cheese, and sausage
21018: [], # Fast foods, egg, scrambled
21020: [], # Fast foods, english muffin, with cheese and sausage
21021: [], # Fast foods, english muffin, with egg, cheese, and canadian bacon
21022: [], # Fast foods, english muffin, with egg, cheese, and sausage
21024: [], # Fast foods, french toast sticks
21026: [], # Fast foods, potatoes, hash browns, round pieces or patty
21028: [], # Fast foods, vanilla, light, soft-serve ice cream, with cone
21032: [], # Fast foods, sundae, caramel
21033: [], # Fast foods, sundae, hot fudge
21034: [], # Fast foods, sundae, strawberry
21059: [], # Fast foods, shrimp, breaded and fried
21060: [], # Fast foods, burrito, with beans
21061: [], # Fast foods, burrito, with beans and cheese
21063: [], # Fast foods, burrito, with beans and beef
21064: [], # Fast foods, burrito, with beans, cheese, and beef
21078: [], # Fast foods, nachos, with cheese
21080: [], # Fast foods, nachos, with cheese, beans, ground beef, and tomatoes
21082: [], # Fast foods, taco with beef, cheese and lettuce, hard shell
21089: [], # Fast foods, cheeseburger; single, regular patty; plain
21090: [], # Fast foods, cheeseburger; single, regular patty, with condiments
21091: [], # Fast foods, cheeseburger; single, regular patty, with condiments and vegetables
21094: [], # Fast foods, cheeseburger, double, regular patty and bun, with condiments
21096: [], # Fast foods, cheeseburger; single, large patty; plain
21102: [], # Fast foods, chicken fillet sandwich, plain with pickles
21105: [], # Fast foods, fish sandwich, with tartar sauce
21106: [], # Fast foods, fish sandwich, with tartar sauce and cheese
21107: [], # Fast foods, hamburger; single, regular patty; plain
21108: [], # Fast foods, hamburger; single, regular patty; with condiments
21121: [], # Fast foods, roast beef sandwich, plain
21124: [], # Fast foods, submarine sandwich, cold cut on | |
<filename>scandeval/benchmarks/abstract/token_classification.py
'''Abstract token classification benchmark'''
from transformers import (DataCollatorForTokenClassification,
PreTrainedTokenizerBase)
from datasets import Dataset, load_metric
from functools import partial
from typing import Optional, Dict, List
import logging
from abc import ABC, abstractmethod
from tqdm.auto import tqdm
from .base import BaseBenchmark
from ...utils import InvalidBenchmark
logger = logging.getLogger(__name__)
class TokenClassificationBenchmark(BaseBenchmark, ABC):
'''Abstract token classification benchmark.
Args:
name (str):
The name of the dataset.
metric_names (dict):
A dictionary with the variable names of the metrics used in the
dataset as keys, and a more human readable name of them as values.
id2label (list or None, optional):
A list of all the labels, which is used to convert indices to their
labels. This will only be used if the pretrained model does not
already have one. Defaults to None.
label_synonyms (list of lists of str or None, optional):
A list of synonyms for each label. Every entry in `label_synonyms`
is a list of synonyms, where one of the synonyms is contained in
`id2label`. If None then no synonyms will be used. Defaults to
None.
evaluate_train (bool, optional):
Whether the models should be evaluated on the training scores.
Defaults to False.
cache_dir (str, optional):
Where the downloaded models will be stored. Defaults to
'.benchmark_models'.
two_labels (bool, optional):
Whether two labels should be predicted in the dataset. If this is
True then `split_point` has to be set. Defaults to False.
split_point (int or None, optional):
When there are two labels to be predicted, this is the index such
that `id2label[:split_point]` contains the labels for the first
label, and `id2label[split_point]` contains the labels for the
second label. Only relevant if `two_labels` is True. Defaults to
None.
verbose (bool, optional):
Whether to print additional output during evaluation. Defaults to
False.
Attributes:
name (str): The name of the dataset.
task (str): The type of task to be benchmarked.
metric_names (dict): The names of the metrics.
id2label (dict or None): A dictionary converting indices to labels.
label2id (dict or None): A dictionary converting labels to indices.
num_labels (int or None): The number of labels in the dataset.
label_synonyms (list of lists of str): Synonyms of the dataset labels.
evaluate_train (bool): Whether the training set should be evaluated.
cache_dir (str): Directory where models are cached.
two_labels (bool): Whether two labels should be predicted.
split_point (int or None): Splitting point of `id2label` into labels.
verbose (bool): Whether to print additional output.
'''
def __init__(self,
name: str,
metric_names: Dict[str, str],
id2label: list,
label_synonyms: Optional[List[List[str]]] = None,
evaluate_train: bool = False,
cache_dir: str = '.benchmark_models',
two_labels: bool = False,
split_point: Optional[int] = None,
verbose: bool = False):
self._metric = load_metric('seqeval')
super().__init__(task='token-classification',
name=name,
metric_names=metric_names,
id2label=id2label,
label_synonyms=label_synonyms,
cache_dir=cache_dir,
evaluate_train=evaluate_train,
two_labels=two_labels,
split_point=split_point,
verbose=verbose)
def _tokenize_and_align_labels(self,
examples: dict,
tokenizer,
label2id: dict):
'''Tokenise all texts and align the labels with them.
Args:
examples (dict):
The examples to be tokenised.
tokenizer (HuggingFace tokenizer):
A pretrained tokenizer.
label2id (dict):
A dictionary that converts NER tags to IDs.
Returns:
dict:
A dictionary containing the tokenized data as well as labels.
'''
tokenized_inputs = tokenizer(
examples['tokens'],
# We use this argument because the texts in our dataset are lists
# of words (with a label for each word)
is_split_into_words=True,
truncation=True,
padding=True
)
all_labels = []
for i, labels in enumerate(examples['orig_labels']):
try:
word_ids = tokenized_inputs.word_ids(batch_index=i)
# This happens if the tokenizer is not of the fast variant, in
# which case the `word_ids` method is not available, so we have to
# extract this manually. It's slower, but it works, and it should
# only occur rarely, when the HuggingFace team has not implemented
# a fast variant of the tokenizer yet.
except ValueError:
# Get the list of words in the document
words = examples['tokens'][i]
# Get the list of token IDs in the document
tok_ids = tokenized_inputs.input_ids[i]
# Decode the token IDs
tokens = tokenizer.convert_ids_to_tokens(tok_ids)
# Remove prefixes from the tokens
prefixes_to_remove = ['▁', '##']
for tok_idx, tok in enumerate(tokens):
for prefix in prefixes_to_remove:
tok = tok.lstrip(prefix)
tokens[tok_idx] = tok
# Replace special tokens with `None`
sp_toks = tokenizer.special_tokens_map.values()
tokens = [None if tok in sp_toks else tok for tok in tokens]
# Get the alignment between the words and the tokens, on a
# character level
word_idxs = [word_idx for word_idx, word in enumerate(words)
for _ in str(word)]
token_idxs = [tok_idx for tok_idx, tok in enumerate(tokens)
for _ in str(tok) if tok is not None]
alignment = list(zip(word_idxs, token_idxs))
# Raise error if there are not as many characters in the words
# as in the tokens. This can be due to the use of a different
# prefix.
if len(word_idxs) != len(token_idxs):
raise InvalidBenchmark('The tokens could not be aligned '
'with the words during manual '
'word-token alignment. It seems '
'that the tokenizer is neither of '
'the fast variant nor of a '
'SentencePiece/WordPiece variant.')
# Get the aligned word IDs
word_ids = list()
for tok_idx, tok in enumerate(tokens):
if tok is None or tok == '':
word_ids.append(None)
else:
word_idx = [word_idx
for word_idx, token_idx in alignment
if token_idx == tok_idx][0]
word_ids.append(word_idx)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label
# to -100 so they are automatically ignored in the loss
# function
if word_idx is None:
if self.two_labels:
label_ids.append([-100, -100])
else:
label_ids.append(-100)
# We set the label for the first token of each word
elif word_idx != previous_word_idx:
label = labels[word_idx]
if self.two_labels:
try:
label_id1 = label2id[label[0]]
except KeyError:
msg = (f'The label {label[0]} was not found '
f'in the model\'s config.')
raise InvalidBenchmark(msg)
try:
label_id2 = label2id[label[1]]
except KeyError:
msg = (f'The label {label[1]} was not found '
f'in the model\'s config.')
raise InvalidBenchmark(msg)
label_id = [label_id1, label_id2]
else:
try:
label_id = label2id[label]
except KeyError:
msg = (f'The label {label} was not found '
f'in the model\'s config.')
raise InvalidBenchmark(msg)
label_ids.append(label_id)
# For the other tokens in a word, we set the label to -100
else:
if self.two_labels:
label_ids.append([-100, -100])
else:
label_ids.append(-100)
previous_word_idx = word_idx
all_labels.append(label_ids)
tokenized_inputs['labels'] = all_labels
return tokenized_inputs
def _preprocess_data(self,
dataset: Dataset,
framework: str,
**kwargs) -> Dataset:
'''Preprocess a dataset by tokenizing and aligning the labels.
Args:
dataset (HuggingFace dataset):
The dataset to preprocess.
kwargs:
Extra keyword arguments containing objects used in
preprocessing the dataset.
Returns:
HuggingFace dataset: The preprocessed dataset.
'''
if framework == 'pytorch':
map_fn = partial(self._tokenize_and_align_labels,
tokenizer=kwargs['tokenizer'],
label2id=kwargs['config'].label2id)
tokenised_dataset = dataset.map(map_fn, batched=True)
return tokenised_dataset
elif framework == 'spacy':
return dataset
def _load_data_collator(
self,
tokenizer: Optional[PreTrainedTokenizerBase] = None):
'''Load the data collator used to prepare samples during finetuning.
Args:
tokenizer (HuggingFace tokenizer or None, optional):
A pretrained tokenizer. Can be None if the tokenizer is not
used in the initialisation of the data collator. Defaults to
None.
Returns:
HuggingFace data collator: The data collator.
'''
if self.two_labels:
params = dict(label_pad_token_id=[-100, -100])
else:
params = dict(label_pad_token_id=-100)
return DataCollatorForTokenClassification(tokenizer, **params)
def _get_spacy_predictions_and_labels(self,
model,
dataset: Dataset,
progress_bar: bool) -> tuple:
'''Get predictions from SpaCy model on dataset.
Args:
model (SpaCy model): The model.
dataset (HuggingFace dataset): The dataset.
Returns:
A pair of arrays:
The first array contains the probability predictions and the
second array contains the true labels.
'''
# Initialise progress bar
if progress_bar:
itr = tqdm(dataset['doc'], desc='Evaluating model', leave=False)
else:
itr = dataset['doc']
processed = model.pipe(itr, batch_size=32)
map_fn = self._extract_spacy_predictions
predictions = map(map_fn, zip(dataset['tokens'], processed))
return list(predictions), dataset['orig_labels']
def _extract_spacy_predictions(self, tokens_processed: tuple) -> list:
'''Helper function that extracts the predictions from a SpaCy model.
Aside from extracting the predictions from the model, it also aligns
the predictions with the gold tokens, in case the SpaCy tokeniser
tokenises the text different from those.
Args:
tokens_processed (tuple):
A pair of the labels, being a list of strings, and the SpaCy
processed document, being a Spacy `Doc` instance.
Returns:
list:
A list of predictions for each token, of the same length as the
gold tokens (first entry of `tokens_processed`).
'''
| |
<filename>cdp_scrapers/scraper_utils.py
import json
import re
from copy import deepcopy
from datetime import datetime, timedelta
from itertools import filterfalse, groupby
from logging import getLogger
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Set
import pytz
from cdp_backend.database.constants import RoleTitle
from cdp_backend.pipeline.ingestion_models import (
Body,
IngestionModel,
Person,
Role,
Seat,
)
from cdp_backend.utils.constants_utils import get_all_class_attr_values
from .types import ScraperStaticData
###############################################################################
log = getLogger(__name__)
###############################################################################
def reduced_list(input_list: List[Any], collapse: bool = True) -> Optional[List]:
"""
Remove all None items from input_list.
Parameters
----------
input_list: List[Any]
Input list from which to filter out items that are None
collapse: bool, default = True
If True, return None in place of an empty list
Returns
-------
reduced_list: Optional[List]
All items in the original list except for None values.
None if all items were None and collapse is True.
"""
filtered = [item for item in input_list if item is not None]
if collapse and len(filtered) == 0:
filtered = None
return filtered
def str_simplified(input_str: str) -> str:
"""
Remove leading and trailing whitespaces, simplify multiple whitespaces, unify
newline characters.
Parameters
----------
input_str: str
Returns
-------
cleaned: str
input_str stripped if it is a string
"""
if not isinstance(input_str, str):
return input_str
input_str = input_str.strip()
# unify newline to \n
input_str = re.sub(r"[\r\n\f]+", r"\n", input_str)
# multiple spaces and tabs to 1 space
input_str = re.sub(r"[ \t\v]+", " ", input_str)
# Replace utf-8 char encodings with single utf-8 chars themselves
input_str = input_str.encode("utf-8").decode("utf-8")
return input_str
def parse_static_person(
person_json: Dict[str, Any],
all_seats: Dict[str, Seat],
primary_bodies: Dict[str, Body],
) -> Person:
"""
Parse Dict[str, Any] for a person in static data file to a Person instance.
person_json["seat"] and person_json["roles"] are validated against
all_seats and primary_bodies in static data file.
Parameters
----------
person_json: Dict[str, Any]
A dictionary in static data file with info for a Person.
all_seats: Dict[str, Seat]
Seats defined as top-level in static data file
primary_bodies: Dict[str, Body]
Bodies defined as top-level in static data file.
See Also
--------
parse_static_file()
sanitize_roles()
"""
log.debug(f"Begin parsing static data for {person_json['name']}")
person: Person = Person.from_dict(
# "seat" and "roles" are not direct serializations of Seat/Role
{k: v for k, v in person_json.items() if k != "seat" and k != "roles"}
)
if "seat" not in person_json:
log.debug("Seat name not given")
return person
seat_name: str = person_json["seat"]
if seat_name not in all_seats:
log.error(f"{seat_name} is not defined in top-level 'seats'")
return person
# Keep all_seats unmodified; we will append Roles to this person.seat below
person.seat = deepcopy(all_seats[seat_name])
if "roles" not in person_json:
log.debug("Roles not given")
return person
# Role.title must be a RoleTitle constant so get all allowed values
role_titles: List[str] = get_all_class_attr_values(RoleTitle)
for role_json in person_json["roles"]:
if (
# if str, it is looked-up in primary_bodies
isinstance(role_json["body"], str)
and role_json["body"] not in primary_bodies
):
log.error(
f"{role_json} is ignored. "
f"{role_json['body']} is not defined in top-level 'primary_bodies'"
)
elif role_json["title"] not in role_titles:
log.error(
f"{role_json} is ignored. "
f"{role_json['title']} is not a RoleTitle constant."
)
else:
role: Role = Role.from_dict(
{k: v for k, v in role_json.items() if k != "body"}
)
if isinstance(role_json["body"], str):
role.body = primary_bodies[role_json["body"]]
else:
# This role.body is a dictionary and defines a non-primary one
# e.g. like a committee such as Transportation
# that is not the main/full council
role.body = Body.from_dict(role_json["body"])
if person.seat.roles is None:
person.seat.roles = [role]
else:
person.seat.roles.append(role)
return person
def parse_static_file(file_path: Path) -> ScraperStaticData:
"""
Parse Seats, Bodies and Persons from static data JSON
Parameters
----------
file_path: Path
Path to file containing static data in JSON
Returns
-------
ScraperStaticData:
Tuple[Dict[str, Seat], Dict[str, Body], Dict[str, Person]]
See Also
-----
parse_static_person()
sanitize_roles()
Notes
-----
Function looks for "seats", "primary_bodies", "persons" top-level keys
"""
with open(file_path) as static_file:
static_json: Dict[str, Dict[str, Any]] = json.load(static_file)
if "seats" not in static_json:
seats: Dict[str, Seat] = {}
else:
seats: Dict[str, Seat] = {
seat_name: Seat.from_dict(seat)
for seat_name, seat in static_json["seats"].items()
}
if "primary_bodies" not in static_json:
primary_bodies: Dict[str, Body] = {}
else:
primary_bodies: Dict[str, Body] = {
body_name: Body.from_dict(body)
for body_name, body in static_json["primary_bodies"].items()
}
if "persons" not in static_json:
known_persons: Dict[str, Person] = {}
else:
known_persons: Dict[str, Person] = {
person_name: parse_static_person(person, seats, primary_bodies)
for person_name, person in static_json["persons"].items()
}
log.debug(
f"ScraperStaticData parsed from {file_path}:\n"
f" seats: {list(seats.keys())}\n"
f" primary_bodies: {list(primary_bodies.keys())}\n"
f" persons: {list(known_persons.keys())}\n"
)
return ScraperStaticData(
seats=seats, primary_bodies=primary_bodies, persons=known_persons
)
def sanitize_roles(
person_name: str,
roles: Optional[List[Role]] = None,
static_data: Optional[ScraperStaticData] = None,
council_pres_patterns: List[str] = ["chair", "pres", "super"],
chair_patterns: List[str] = ["chair", "pres"],
) -> Optional[List[Role]]:
"""
1. Standardize roles[i].title to RoleTitle constants
2. Ensure only 1 councilmember Role per term
Parameters
----------
person_name: str
Sanitization target Person.name
roles: Optional[List[Role]] = None
target Person's Roles to sanitize
static_data: Optional[ScraperStaticData]
Static data defining primary council bodies and predefined Person.seat.roles.
See Notes.
council_pres_patterns: List[str]
Set roles[i].title as "Council President" if match
and roles[i].body is a primary body like City Council
chair_patterns: List[str]
Set roles[i].title as "Chair" if match
and roles[i].body is not a primary body
Notes
-----
Remove roles[#] if roles[#].body in static_data.primary_bodies.
Use static_data.persons[#].seat.roles instead.
If roles[i].body not in static_data.primary_bodies,
roles[i].title cannot be "Councilmember" or "Council President".
Use "City Council" and "Council Briefing"
if static_data.primary_bodies is empty.
"""
if roles is None:
roles = []
if not static_data or not static_data.primary_bodies:
# Primary/full council not defined in static data file.
# these are reasonably good defaults for most municipalities.
primary_body_names = ["city council", "council briefing"]
else:
primary_body_names = [
body_name.lower() for body_name in static_data.primary_bodies.keys()
]
try:
have_primary_roles = len(static_data.persons[person_name].seat.roles) > 0
except (KeyError, AttributeError, TypeError):
have_primary_roles = False
def _is_role_period_ok(role: Role) -> bool:
"""
Test that role.[start | end]_datetime is acceptable
"""
if role.start_datetime is None or role.end_datetime is None:
return False
if not have_primary_roles:
# no roles in static data; accept if this this role is current
return role.start_datetime.astimezone(
pytz.utc
) <= datetime.today().astimezone(pytz.utc) and datetime.today().astimezone(
pytz.utc
) <= role.end_datetime.astimezone(
pytz.utc
)
# accept if role coincides with one given in static data
for static_role in static_data.persons[person_name].seat.roles:
if (
static_role.start_datetime <= role.start_datetime
and role.end_datetime <= static_role.end_datetime
):
return True
return False
def _is_primary_body(role: Role) -> bool:
"""
Is role.body one of primary_bodies in static data file
"""
return (
role.body is not None
and role.body.name is not None
and str_simplified(role.body.name).lower() in primary_body_names
)
def _fix_primary_title(role: Role) -> str:
"""
Council president or Councilmember
"""
if (
role.title is None
or re.search(
"|".join(council_pres_patterns), str_simplified(role.title), re.I
)
is None
):
return RoleTitle.COUNCILMEMBER
return RoleTitle.COUNCILPRESIDENT
def _fix_nonprimary_title(role: Role) -> str:
"""
Not council president or councilmember
"""
if role.title is None:
return RoleTitle.MEMBER
role_title = str_simplified(role.title).lower()
# Role is not for a primary/full council
# Role.title cannot be Councilmember or Council President
if "vice" in role_title:
return RoleTitle.VICE_CHAIR
if "alt" in role_title:
return RoleTitle.ALTERNATE
if "super" in role_title:
return RoleTitle.SUPERVISOR
if re.search("|".join(chair_patterns), role_title, re.I) is not None:
return RoleTitle.CHAIR
return RoleTitle.MEMBER
def _is_councilmember_term(role: Role) -> bool:
return (
role.title == RoleTitle.COUNCILMEMBER
and role.start_datetime is not None
and role.end_datetime is not None
)
roles = list(
# drop dynamically scraped primary roles
# if primary roles are given in static data
filterfalse(
lambda role: have_primary_roles and _is_primary_body(role),
# filter out bad start_datetime, end_datetime
filter(_is_role_period_ok, roles),
)
)
# standardize titles
for role in filter(_is_primary_body, roles):
role.title = _fix_primary_title(role)
for role in filterfalse(_is_primary_body, roles):
role.title = _fix_nonprimary_title(role)
class CouncilMemberTerm(NamedTuple):
start_datetime: datetime
end_datetime: datetime
index_in_roles: int
# when checking for overlapping terms, we should do so per body.
# e.g. simultaneous councilmember roles in city council and in council briefing
# are completely acceptable and common.
scraped_member_roles_by_body: List[List[Role]] = [
list(roles_for_body)
for body_name, roles_for_body in groupby(
sorted(
filter(
# get all dynamically scraped councilmember terms
lambda role: not have_primary_roles
and _is_councilmember_term(role),
roles,
),
# sort from old to new role
key=lambda role: (
role.body.name,
role.start_datetime,
role.end_datetime,
),
),
# group by body
key=lambda role: role.body.name,
)
]
if have_primary_roles:
# don't forget to include info from the static data file
roles.extend(static_data.persons[person_name].seat.roles)
if len(scraped_member_roles_by_body) == 0:
# no Councilmember roles dynamically scraped
# nothing more to | |
L 1.364,6.52\
3 C 1.335,6.738 \
1.15,6.898 0.933\
,6.896 H -0.933 \
C -1.148,6.898 -\
1.331,6.74 -1.36\
1,6.526 L -1.524\
,5.403 C -1.746,\
5.301 -1.958,5.1\
78 -2.157,5.034 \
L -3.214,5.46 C \
-3.415,5.539 -3.\
643,5.457 -3.748\
,5.269 L -4.68,3\
.657 C -4.79,3.4\
7 -4.745,3.23 -4\
.574,3.095 l 0.8\
95,-0.7 c -0.028\
,-0.244 -0.028,-\
0.49 0,-0.735 L \
-4.575,0.961 C -\
4.743,0.826 -4.7\
88,0.588 -4.681,\
0.401 l 0.93,-1.\
61 c 0.107,-0.18\
8 0.336,-0.27 0.\
538,-0.193 l 1.0\
55,0.424 c 0.199\
,-0.144 0.411,-0\
.267 0.634,-0.36\
8 l 0.163,-1.12 \
c 0.028,-0.215 0\
.212,-0.375 0.42\
8,-0.374 h 1.866\
c 0.216,-0.001 \
0.399,0.157 0.43\
,0.37 l 0.162,1.\
124 c 0.222,0.10\
2 0.434,0.225 0.\
633,0.368 l 1.05\
9,-0.425 c 0.2,-\
0.079 0.428,0.00\
2 0.534,0.19 L 4\
.685,0.406 C 4.7\
9,0.593 4.744,0.\
828 4.577,0.961\x22\
\x0a st\
yle=\x22fill:#d2d2d\
2;fill-opacity:1\
;fill-rule:nonze\
ro;stroke:none\x22\x0a\
id=\
\x22path263\x22 />\x0a \
</g>\x0a \
<g\x0a \
id=\x22g265\x22\x0a \
transform=\x22\
translate(20.488\
9,22.5272)\x22>\x0a \
<path\x0a \
d=\x22m 0\
,0 c -1.12,0 -2.\
028,0.908 -2.028\
,2.028 0,1.121 0\
.908,2.029 2.028\
,2.029 1.12,0 2.\
028,-0.908 2.028\
,-2.029 C 2.027,\
0.909 1.12,0.001\
0,0 Z m 4.577,0\
.961 -0.895,0.7 \
c 0.028,0.244 0.\
028,0.491 0,0.73\
5 l 0.896,0.7 C \
4.745,3.232 4.79\
,3.469 4.683,3.6\
56 L 3.752,5.265\
C 3.645,5.454 3\
.417,5.536 3.214\
,5.459 L 2.159,5\
.035 C 1.961,5.1\
79 1.749,5.302 1\
.526,5.403 L 1.3\
64,6.523 C 1.335\
,6.738 1.15,6.89\
8 0.933,6.896 H \
-0.933 C -1.148,\
6.898 -1.331,6.7\
4 -1.361,6.526 L\
-1.524,5.403 C \
-1.746,5.301 -1.\
958,5.178 -2.157\
,5.034 L -3.214,\
5.46 C -3.415,5.\
539 -3.643,5.457\
-3.748,5.269 L \
-4.68,3.657 C -4\
.79,3.47 -4.745,\
3.23 -4.574,3.09\
5 l 0.895,-0.7 c\
-0.028,-0.244 -\
0.028,-0.49 0,-0\
.735 L -4.575,0.\
961 C -4.743,0.8\
26 -4.788,0.588 \
-4.681,0.401 l 0\
.93,-1.61 c 0.10\
7,-0.188 0.336,-\
0.27 0.538,-0.19\
3 l 1.055,0.424 \
c 0.199,-0.144 0\
.411,-0.267 0.63\
4,-0.368 l 0.163\
,-1.12 c 0.028,-\
0.215 0.212,-0.3\
75 0.428,-0.374 \
h 1.866 c 0.216,\
-0.001 0.399,0.1\
57 0.43,0.37 l 0\
.162,1.124 c 0.2\
22,0.102 0.434,0\
.225 0.633,0.368\
l 1.059,-0.425 \
c 0.2,-0.079 0.4\
28,0.002 0.534,0\
.19 L 4.685,0.40\
6 C 4.79,0.593 4\
.744,0.828 4.577\
,0.961 Z\x22\x0a \
style=\x22fi\
ll:none;stroke:#\
000000;stroke-wi\
dth:0.25;stroke-\
linecap:round;st\
roke-linejoin:ro\
und;stroke-miter\
limit:10;stroke-\
dasharray:none;s\
troke-opacity:1\x22\
\x0a id\
=\x22path267\x22 />\x0a \
</g>\x0a \
</g>\x0a </g>\x0a \
</g>\x0a <g\x0a \
id=\x22layer4\x22\x0a \
style=\x22display:\
none\x22\x0a trans\
form=\x22matrix(0.9\
8947512,0,0,0.98\
947512,0.0445553\
2,0.04455532)\x22>\x0a\
<g\x0a id\
=\x22g269\x22\x0a t\
ransform=\x22matrix\
(0.35277777,0,0,\
-0.35277777,-1.4\
110049,9.8776365\
)\x22>\x0a <g\x0a \
id=\x22g271\x22\x0a\
clip-pa\
th=\x22url(#clipPat\
h275)\x22>\x0a \
<g\x0a id\
=\x22g277\x22\x0a \
transform=\x22tr\
anslate(15.6063,\
5.5026)\x22>\x0a \
<path\x0a \
d=\x22m 0,0 \
c -2.493,0 -4.77\
1,0.92 -6.485,2.\
633 -2.106,2.107\
-2.818,4.478 -3\
.044,6.095 -0.11\
3,0.806 0.449,1.\
55 1.254,1.663 0\
.806,0.116 1.55,\
-0.449 1.664,-1.\
255 0.238,-1.704\
0.981,-3.191 2.\
21,-4.419 1.283,\
-1.284 3.058,-1.\
904 5.004,-1.746\
1.855,0.151 3.6\
55,0.974 4.938,2\
.258 1.182,1.182\
1.854,2.868 1.8\
43,4.626 -0.011,\
1.957 -0.843,3.8\
41 -2.342,5.304 \
-2.354,2.297 -5.\
588,2.244 -8.1,0\
.882 h 1.541 c 0\
.814,0 1.473,-0.\
659 1.473,-1.473\
0,-0.814 -0.659\
,-1.473 -1.473,-\
1.473 h -4.952 c\
-0.814,0 -1.473\
,0.659 -1.473,1.\
473 v 4.953 c 0,\
0.814 0.659,1.47\
3 1.473,1.473 0.\
813,0 1.472,-0.6\
59 1.472,-1.473 \
V 18.319 C -1.39\
8,20.536 3.524,2\
0.756 7.099,17.2\
67 9.166,15.25 1\
0.314,12.624 10.\
33,9.873 10.346,\
7.333 9.36,4.881\
7.624,3.145 5.8\
55,1.376 3.383,0\
.242 0.843,0.035\
0.56,0.012 0.27\
8,0 0,0\x22\x0a \
style=\x22fil\
l:#d2d2d2;fill-o\
pacity:1;fill-ru\
le:nonzero;strok\
e:none\x22\x0a \
id=\x22path279\
\x22 />\x0a </g\
>\x0a <g\x0a \
id=\x22g281\
\x22\x0a tra\
nsform=\x22translat\
e(15.6063,5.5026\
)\x22>\x0a <p\
ath\x0a \
d=\x22m 0,0 c -2.4\
93,0 -4.771,0.92\
-6.485,2.633 -2\
.106,2.107 -2.81\
8,4.478 -3.044,6\
.095 -0.113,0.80\
6 0.449,1.55 1.2\
54,1.663 0.806,0\
.116 1.55,-0.449\
1.664,-1.255 0.\
238,-1.704 0.981\
,-3.191 2.21,-4.\
419 1.283,-1.284\
3.058,-1.904 5.\
004,-1.746 1.855\
,0.151 3.655,0.9\
74 4.938,2.258 1\
.182,1.182 1.854\
,2.868 1.843,4.6\
26 -0.011,1.957 \
-0.843,3.841 -2.\
342,5.304 -2.354\
,2.297 -5.588,2.\
244 -8.1,0.882 h\
1.541 c 0.814,0\
1.473,-0.659 1.\
473,-1.473 0,-0.\
814 -0.659,-1.47\
3 -1.473,-1.473 \
h -4.952 c -0.81\
4,0 -1.473,0.659\
-1.473,1.473 v \
4.953 c 0,0.814 \
0.659,1.473 1.47\
3,1.473 0.813,0 \
1.472,-0.659 1.4\
72,-1.473 V 18.3\
19 C -1.398,20.5\
36 3.524,20.756 \
7.099,17.267 9.1\
66,15.25 10.314,\
12.624 10.33,9.8\
73 10.346,7.333 \
9.36,4.881 7.624\
,3.145 5.855,1.3\
76 3.383,0.242 0\
.843,0.035 0.56,\
0.012 0.278,0 0,\
0 Z\x22\x0a \
style=\x22fill:no\
ne;stroke:#00000\
0;stroke-width:0\
.25;stroke-linec\
ap:butt;stroke-l\
inejoin:miter;st\
roke-miterlimit:\
10;stroke-dashar\
ray:none;stroke-\
opacity:1\x22\x0a \
id=\x22path\
283\x22 />\x0a \
</g>\x0a </g>\x0a\
</g>\x0a </g>\x0a\
<g\x0a id=\x22la\
yer3\x22\x0a style\
=\x22display:none\x22\x0a\
transform=\x22\
matrix(0.9894723\
6,0,0,0.98947236\
,0.04456703,0.04\
456703)\x22>\x0a <g\
\x0a id=\x22g285\
\x22\x0a transfo\
rm=\x22matrix(0.352\
77777,0,0,-0.352\
77777,-1.5115564\
,9.8777054)\x22>\x0a \
<g\x0a \
id=\x22g287\x22\x0a \
clip-path=\x22ur\
l(#clipPath291)\x22\
>\x0a <g\x0a \
id=\x22g293\
\x22\x0a tra\
nsform=\x22translat\
e(16.2831,11.625\
8)\x22>\x0a <\
path\x0a \
d=\x22m 0,0 c -2.\
416,0 -4.374,1.9\
58 -4.374,4.374 \
0,2.415 1.958,4.\
373 4.374,4.373 \
2.416,0 4.374,-1\
.958 4.374,-4.37\
3 C 4.371,1.959 \
2.414,0.003 0,0 \
m 9.869,2.073 -1\
.93,1.509 c 0.06\
,0.527 0.06,1.05\
8 0,1.585 l 1.93\
2,1.51 c 0.362,0\
.291 0.458,0.803\
0.228,1.206 l -\
2.008,3.47 C 7.8\
6,11.759 7.368,1\
1.937 6.931,11.7\
71 L 4.656,10.85\
6 C 4.229,11.166\
3.771,11.432 3.\
29,11.65 L 2.941\
,14.066 C 2.878,\
14.53 2.48,14.87\
4 2.012,14.871 h\
-4.024 c -0.464\
,0.002 -0.859,-0\
.339 -0.924,-0.7\
98 L -3.285,11.6\
5 c -0.48,-0.22 \
-0.937,-0.486 -1\
.365,-0.794 l -2\
.281,0.916 c -0.\
432,0.171 -0.924\
,-0.004 -1.152,-\
0.41 l -2.009,-3\
.476 c -0.236,-0\
.404 -0.139,-0.9\
21 0.228,-1.212 \
l 1.93,-1.509 c \
-0.06,-0.526 -0.\
06,-1.058 0,-1.5\
85 L -9.865,2.07\
1 c -0.363,-0.29\
1 -0.46,-0.802 -\
0.23,-1.206 l 2.\
007,-3.471 c 0.2\
31,-0.406 0.724,\
-0.584 1.161,-0.\
418 l 2.274,0.91\
5 c 0.428,-0.31 \
0.886,-0.576 1.3\
67,-0.794 l 0.35\
,-2.415 c 0.062,\
-0.462 0.457,-0.\
807 0.924,-0.805\
h 4.024 c 0.465\
,-0.004 0.861,0.\
337 0.926,0.798 \
l 0.35,2.423 c 0\
.48,0.219 0.937,\
0.485 1.365,0.79\
4 l 2.283,-0.918\
c 0.432,-0.17 0\
.924,0.005 1.152\
,0.41 l 2.015,3.\
491 c 0.226,0.40\
3 0.127,0.91 -0.\
234,1.198\x22\x0a \
style=\x22f\
ill:#d2d2d2;fill\
-opacity:1;fill-\
rule:nonzero;str\
oke:none\x22\x0a \
id=\x22path2\
95\x22 />\x0a <\
/g>\x0a <g\x0a \
id=\x22g2\
97\x22\x0a t\
ransform=\x22transl\
ate(16.2831,11.6\
258)\x22>\x0a \
<path\x0a \
d=\x22m 0,0 c -\
2.416,0 -4.374,1\
.958 -4.374,4.37\
4 0,2.415 1.958,\
4.373 4.374,4.37\
3 2.416,0 4.374,\
-1.958 4.374,-4.\
373 C 4.371,1.95\
9 2.414,0.003 0,\
0 Z m 9.869,2.07\
3 -1.93,1.509 c \
0.06,0.527 0.06,\
1.058 0,1.585 l \
1.932,1.51 c 0.3\
62,0.291 0.458,0\
.803 0.228,1.206\
l -2.008,3.47 C\
7.86,11.759 7.3\
68,11.937 6.931,\
11.771 L 4.656,1\
0.856 C 4.229,11\
.166 3.771,11.43\
2 3.29,11.65 L 2\
.941,14.066 C 2.\
878,14.53 2.48,1\
4.874 2.012,14.8\
71 h -4.024 c -0\
.464,0.002 -0.85\
9,-0.339 -0.924,\
-0.798 L -3.285,\
11.65 c -0.48,-0\
.22 -0.937,-0.48\
6 -1.365,-0.794 \
l -2.281,0.916 c\
-0.432,0.171 -0\
.924,-0.004 -1.1\
52,-0.41 l -2.00\
9,-3.476 c -0.23\
6,-0.404 -0.139,\
-0.921 0.228,-1.\
212 l 1.93,-1.50\
9 c -0.06,-0.526\
-0.06,-1.058 0,\
-1.585 L -9.865,\
2.071 c -0.363,-\
0.291 -0.46,-0.8\
02 -0.23,-1.206 \
l 2.007,-3.471 c\
0.231,-0.406 0.\
724,-0.584 1.161\
,-0.418 l 2.274,\
0.915 c 0.428,-0\
.31 0.886,-0.576\
1.367,-0.794 l \
0.35,-2.415 c 0.\
062,-0.462 0.457\
,-0.807 0.924,-0\
.805 h 4.024 c 0\
.465,-0.004 0.86\
1,0.337 0.926,0.\
798 l 0.35,2.423\
c 0.48,0.219 0.\
937,0.485 1.365,\
0.794 l 2.283,-0\
.918 c 0.432,-0.\
17 0.924,0.005 1\
.152,0.41 l 2.01\
5,3.491 c 0.226,\
0.403 0.127,0.91\
-0.234,1.198 z\x22\
\x0a st\
yle=\x22fill:none;s\
troke:#000000;st\
roke-width:0.25;\
stroke-linecap:r\
ound;stroke-line\
join:round;strok\
e-miterlimit:10;\
stroke-dasharray\
:none;stroke-opa\
city:1\x22\x0a \
id=\x22path299\
\x22 />\x0a </g\
>\x0a </g>\x0a \
</g>\x0a </g>\x0a <\
g\x0a id=\x22layer\
2\x22\x0a style=\x22d\
isplay:none\x22\x0a \
transform=\x22mat\
rix(0.98033062,0\
,0,0.98033062,0.\
08326703,0.08326\
703)\x22>\x0a <g\x0a \
style=\x22disp\
lay:inline\x22\x0a \
id=\x22g301\x22\x0a \
transform=\x22m\
atrix(0.35277777\
,0,0,-0.35277777\
,-1.411111,9.701\
3888)\x22>\x0a <g\
\x0a id=\x22g3\
03\x22\x0a cli\
p-path=\x22url(#cli\
pPath307)\x22>\x0a \
<g\x0a \
id=\x22g309\x22\x0a \
transform\
=\x22translate(16,2\
4.0835)\x22>\x0a \
<path\x0a \
d=\x22m 0,0 \
c -4.733,0 -8.58\
3,-3.851 -8.583,\
-8.583 0,-4.733 \
3.85,-8.584 8.58\
3,-8.584 4.732,0\
8.583,3.851 8.5\
83,8.584 C 8.583\
,-3.851 4.732,0 \
0,0 m 0,-19.167 \
c -5.836,0 -10.5\
83,4.748 -10.583\
,10.584 C -10.58\
3,-2.748 -5.836,\
2 0,2 5.836,2 10\
.583,-2.748 10.5\
83,-8.583 10.583\
,-14.419 5.836,-\
19.167 0,-19.167\
\x22\x0a s\
tyle=\x22fill:#d2d2\
d2;fill-opacity:\
1;fill-rule:nonz\
ero;stroke:none\x22\
\x0a id\
=\x22path311\x22 />\x0a \
</g>\x0a \
<g\x0a \
id=\x22g313\x22\x0a \
transform=\
\x22translate(16,24\
.0835)\x22\x0a \
style=\x22displa\
y:inline\x22>\x0a \
<path\x0a \
d=\x22m 0,0\
c -4.733,0 -8.5\
83,-3.851 -8.583\
,-8.583 0,-4.733\
3.85,-8.584 8.5\
83,-8.584 4.732,\
0 8.583,3.851 8.\
583,8.584 C 8.58\
3,-3.851 4.732,0\
0,0 Z m 0,-19.1\
67 c -5.836,0 -1\
0.583,4.748 -10.\
583,10.584 C -10\
.583,-2.748 -5.8\
36,2 0,2 5.836,2\
10.583,-2.748 1\
0.583,-8.583 10.\
583,-14.419 5.83\
6,-19.167 0,-19.\
167 Z\x22\x0a \
style=\x22fill:\
none;stroke:#000\
000;stroke-width\
:0.25;stroke-lin\
ecap:butt;stroke\
-linejoin:miter;\
stroke-miterlimi\
t:10;stroke-dash\
array:none;strok\
e-opacity:1\x22\x0a \
id=\x22pa\
th315\x22 />\x0a \
</g>\x0a <\
g\x0a id=\
\x22g317\x22\x0a \
transform=\x22tra\
nslate(16,8.4795\
)\x22>\x0a <p\
ath\x0a \
d=\x22m 0,0 c -0.5\
52,0 -1,0.447 -1\
,1 v 8.437 c 0,0\
.552 0.448,1 1,1\
0.552,0 1,-0.44\
8 1,-1 V 1 C 1,0\
.447 0.552,0 0,0\
\x22\x0a s\
tyle=\x22fill:#d2d2\
d2;fill-opacity:\
1;fill-rule:nonz\
ero;stroke:none\x22\
\x0a id\
=\x22path319\x22 />\x0a \
</g>\x0a \
<g\x0a \
id=\x22g321\x22\x0a \
transform=\
\x22translate(16,8.\
4795)\x22\x0a \
style=\x22display\
:inline\x22>\x0a \
<path\x0a \
d=\x22m 0,0 \
c -0.552,0 -1,0.\
447 -1,1 v 8.437\
c 0,0.552 0.448\
,1 1,1 0.552,0 1\
,-0.448 1,-1 V 1\
C 1,0.447 0.552\
,0 0,0 Z\x22\x0a \
style=\x22fi\
ll:none;stroke:#\
000000;stroke-wi\
dth:0.25;stroke-\
linecap:butt;str\
oke-linejoin:mit\
er;stroke-miterl\
imit:10;stroke-d\
asharray:none;st\
roke-opacity:1\x22\x0a\
id=\
\x22path323\x22 />\x0a \
</g>\x0a \
<g\x0a \
id=\x22g325\x22\x0a \
transform=\x22\
translate(14.937\
5,21.2445)\x22\x0a \
style=\x22di\
splay:inline\x22>\x0a \
<path\x0a \
d=\x22M\
0,0 C 0,0.587 0\
.476,1.062 1.062\
,1.062 1.649,1.0\
62 2.125,0.587 2\
.125,0 2.125,-0.\
587 1.649,-1.062\
1.062,-1.062 0.\
476,-1.062 0,-0.\
587 0,0\x22\x0a \
style=\x22fil\
l:#d2d2d2;fill-o\
pacity:1;fill-ru\
le:nonzero;strok\
e:none\x22\x0a \
id=\x22path327\
\x22 />\x0a </g\
>\x0a <g\x0a \
id=\x22g329\
\x22\x0a tra\
nsform=\x22translat\
e(14.9375,21.244\
5)\x22>\x0a <\
path\x0a \
d=\x22M 0,0 C 0,0\
.587 0.476,1.062\
1.062,1.062 1.6\
49,1.062 2.125,0\
.587 2.125,0 2.1\
25,-0.587 1.649,\
-1.062 1.062,-1.\
062 0.476,-1.062\
0,-0.587 0,0 Z\x22\
\x0a st\
yle=\x22fill:none;s\
troke:#000000;st\
roke-width:0.25;\
stroke-linecap:b\
utt;stroke-linej\
oin:miter;stroke\
-miterlimit:10;s\
troke-dasharray:\
none;stroke-opac\
ity:1\x22\x0a \
id=\x22path331\x22\
/>\x0a </g>\
\x0a </g>\x0a \
</g>\x0a </g>\x0a</sv\
g>\x0a\
\x00\x00Ld\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22 standalone=\x22\
no\x22?>\x0a<svg\x0a xm\
lns:dc=\x22http://p\
url.org/dc/eleme\
nts/1.1/\x22\x0a xml\
ns:cc=\x22http://cr\
eativecommons.or\
g/ns#\x22\x0a xmlns:\
rdf=\x22http://www.\
w3.org/1999/02/2\
2-rdf-syntax-ns#\
\x22\x0a xmlns:svg=\x22\
http://www.w3.or\
g/2000/svg\x22\x0a x\
mlns=\x22http://www\
.w3.org/2000/svg\
\x22\x0a width=\x2232\x22\x0a\
height=\x2232\x22\x0a \
viewBox=\x220 0 8\
.4666668 8.46666\
66\x22\x0a version=\x22\
1.1\x22\x0a id=\x22svg8\
\x22>\x0a <defs\x0a \
id=\x22defs2\x22>\x0a \
<clipPath\x0a \
id=\x22clipPath203\
\x22\x0a clipPat\
hUnits=\x22userSpac\
eOnUse\x22>\x0a <\
path\x0a id\
=\x22path201\x22\x0a \
d=\x22M 0,32 H \
32 V 0 H 0 Z\x22 />\
\x0a </clipPath>\
\x0a <clipPath\x0a \
id=\x22clipPa\
th239\x22\x0a cl\
ipPathUnits=\x22use\
rSpaceOnUse\x22>\x0a \
<path\x0a \
id=\x22path237\x22\x0a\
d=\x22M 0,\
32 H 32 V 0 H 0 \
Z\x22 />\x0a </clip\
Path>\x0a <clipP\
ath\x0a id=\x22c\
lipPath275\x22\x0a \
clipPathUnits\
=\x22userSpaceOnUse\
\x22>\x0a <path\x0a \
id=\x22path\
273\x22\x0a d=\
\x22M 0,32 H 32 V 0\
H 0 Z\x22 />\x0a <\
/clipPath>\x0a <\
clipPath\x0a \
id=\x22clipPath291\x22\
\x0a clipPath\
Units=\x22userSpace\
OnUse\x22>\x0a <p\
ath\x0a id=\
\x22path289\x22\x0a \
d=\x22M 0,32 H 3\
2 V 0 H 0 Z\x22 />\x0a\
</clipPath>\x0a\
<clipPath\x0a \
id=\x22clipPat\
h307\x22\x0a cli\
pPathUnits=\x22user\
SpaceOnUse\x22>\x0a \
<path\x0a \
id=\x22path305\x22\x0a \
d=\x22M 0,3\
2 H 32 V 0 H 0 Z\
\x22 />\x0a </clipP\
ath>\x0a </defs>\x0a \
<metadata\x0a \
id=\x22metadata5\x22>\x0a\
<rdf:RDF>\x0a \
<cc:Work\x0a \
rdf:about=\
\x22\x22>\x0a <dc:\
format>image/svg\
+xml</dc:format>\
\x0a <dc:typ\
e\x0a rdf\
:resource=\x22http:\
//purl.org/dc/dc\
mitype/StillImag\
e\x22 />\x0a <d\
c:title></dc:tit\
le>\x0a </cc:W\
ork>\x0a </rdf:R\
DF>\x0a </metadata\
>\x0a <g\x0a id=\x22\
layer1\x22\x0a sty\
le=\x22display:none\
\x22\x0a transform\
=\x22matrix(0.78228\
577,0,0,0.782285\
77,-106.86137,-1\
00.39763)\x22>\x0a \
<g\x0a id=\x22g1\
97\x22\x0a trans\
form=\x22matrix(0.3\
5277777,0,0,-0.3\
5277777,136.3684\
5,139.43964)\x22>\x0a \
<g\x0a \
id=\x22g199\x22\x0a \
clip-path=\x22u\
rl(#clipPath203)\
\x22>\x0a <g\x0a \
id=\x22g20\
5\x22\x0a tr\
ansform=\x22transla\
te(6.7698,17.319\
)\x22>\x0a <p\
ath\x0a \
d=\x22m 0,0 v -8.9\
72 c 0,-0.924 0.\
868,-1.365 0.868\
,-1.365 0,0 6.82\
9,-3.849 7.055,-\
3.965 0.226,-0.1\
17 0.433,-0.2 0.\
922,-0.2 h 0.762\
c 0.5,0 0.69,0.\
068 0.923,0.2 0.\
233,0.131 7.054,\
3.964 7.054,3.96\
4 0,0 0.848,0.48\
2 0.868,1.366 0.\
019,0.884 0,8.97\
2 0,8.972 0,0 0,\
0.61 -0.609,0.55\
2 C 17.195,0.491\
13.717,-0.347 9\
.226,-0.347 4.73\
4,-0.347 1.403,0\
.418 0.83,0.528 \
0.363,0.618 0,0.\
447 0,0\x22\x0a \
style=\x22fil\
l:#bdbdbd;fill-o\
pacity:1;fill-ru\
le:nonzero;strok\
e:none\x22\x0a \
id=\x22path207\
\x22 />\x0a </g\
>\x0a <g\x0a \
id=\x22g209\
\x22\x0a tra\
nsform=\x22translat\
e(6.7698,17.319)\
\x22>\x0a <pa\
th\x0a \
d=\x22m 0,0 v -8.97\
2 c 0,-0.924 0.8\
68,-1.365 0.868,\
-1.365 0,0 6.829\
,-3.849 7.055,-3\
.965 0.226,-0.11\
7 0.433,-0.2 0.9\
22,-0.2 h 0.762 \
c 0.5,0 0.69,0.0\
68 0.923,0.2 0.2\
33,0.131 7.054,3\
.964 7.054,3.964\
0,0 0.848,0.482\
0.868,1.366 0.0\
19,0.884 0,8.972\
0,8.972 0,0 0,0\
.61 -0.609,0.552\
C 17.195,0.491 \
13.717,-0.347 9.\
226,-0.347 4.734\
,-0.347 1.403,0.\
418 0.83,0.528 0\
.363,0.618 0,0.4\
47 0,0 Z\x22\x0a \
style=\x22fi\
ll:none;stroke:#\
000000;stroke-wi\
dth:0.25;stroke-\
linecap:butt;str\
oke-linejoin:mit\
er;stroke-miterl\
imit:10;stroke-d\
asharray:none;st\
roke-opacity:1\x22\x0a\
id=\
\x22path211\x22 />\x0a \
</g>\x0a \
<g\x0a \
id=\x22g213\x22\x0a \
transform=\x22\
translate(8.9101\
,15.678)\x22>\x0a \
<path\x0a \
d=\x22M 0,0\
V -0.495\x22\x0a \
style=\x22f\
ill:none;stroke:\
#000000;stroke-w\
idth:0.5;stroke-\
linecap:round;st\
roke-linejoin:ro\
und;stroke-miter\
limit:4;stroke-d\
asharray:none;st\
roke-opacity:1\x22\x0a\
id=\
\x22path215\x22 />\x0a \
</g>\x0a \
<g\x0a \
id=\x22g217\x22\x0a \
transform=\x22\
translate(8.9101\
,14.1543)\x22>\x0a \
<path\x0a \
d=\x22m 0,\
0 v -5.368 c 0,-\
0.709 0.666,-1.0\
49 0.666,-1.049 \
0,0 5.246,-2.955\
5.42,-3.045 0.1\
73,-0.09 0.333,-\
0.154 0.708,-0.1\
54 H 7.38 c 0.38\
4,0 0.529,0.053 \
0.708,0.154 0.17\
9,0.1 5.419,3.04\
5 5.419,3.045 0,\
0 0.651,0.37 0.6\
66,1.049 0.011,0\
.506 0.006,4.083\
0.002,5.875\x22\x0a \
style\
=\x22fill:none;stro\
ke:#000000;strok\
e-width:0.5;stro\
ke-linecap:round\
;stroke-linejoin\
:round;stroke-mi\
terlimit:4;strok\
e-dasharray:1.03\
, 1.03;stroke-da\
shoffset:0;strok\
e-opacity:1\x22\x0a \
id=\x22pa\
th219\x22 />\x0a \
</g>\x0a <\
g\x0a id=\
\x22g221\x22\x0a \
transform=\x22tra\
nslate(23.0846,1\
5.1807)\x22>\x0a \
| |
of our managed networks
LOG.warning("Lease for unknown network found in "
"dnsmasq.leases file: {}".format(columns))
continue
interface_name = self._find_local_interface_name(
network_type
)
self._dhcp_release(interface_name, ip_address, mac_address)
except Exception as e:
LOG.error("Failed to remove leases for %s: %s" % (mac_address,
str(e)))
def _remove_lease_for_address(self, hostname, network_type):
"""Remove the lease for a given address"""
address_name = cutils.format_address_name(hostname, network_type)
try:
interface_name = self._find_local_interface_name(network_type)
if not interface_name:
return
address = self.dbapi.address_get_by_name(address_name)
interface_uuid = address.interface_uuid
ip_address = address.address
if interface_uuid:
interface = self.dbapi.iinterface_get(interface_uuid)
mac_address = interface.imac
elif network_type == constants.NETWORK_TYPE_MGMT:
ihost = self.dbapi.ihost_get_by_hostname(hostname)
mac_address = ihost.mgmt_mac
else:
return
cid = cutils.get_dhcp_cid(hostname, network_type, mac_address)
self._dhcp_release(interface_name, ip_address, mac_address, cid)
except Exception as e:
LOG.error("Failed to remove lease %s: %s" % (address_name,
str(e)))
def _unallocate_address(self, hostname, network_type):
"""Unallocate address if it exists"""
address_name = cutils.format_address_name(hostname, network_type)
if network_type == constants.NETWORK_TYPE_MGMT:
self._remove_lease_for_address(hostname, network_type)
try:
address_uuid = self.dbapi.address_get_by_name(address_name).uuid
self.dbapi.address_remove_interface(address_uuid)
except exception.AddressNotFoundByName:
pass
def _remove_address(self, hostname, network_type):
"""Remove address if it exists"""
address_name = cutils.format_address_name(hostname, network_type)
self._remove_lease_for_address(hostname, network_type)
try:
address_uuid = self.dbapi.address_get_by_name(address_name).uuid
self.dbapi.address_destroy(address_uuid)
except exception.AddressNotFoundByName:
pass
except exception.AddressNotFound:
pass
def _unallocate_addresses_for_host(self, host):
"""Unallocates management addresses for a given host.
:param host: host object
"""
hostname = host.hostname
self._unallocate_address(hostname, constants.NETWORK_TYPE_MGMT)
if host.personality == constants.CONTROLLER:
self._unallocate_address(hostname, constants.NETWORK_TYPE_OAM)
self._unallocate_address(hostname, constants.NETWORK_TYPE_PXEBOOT)
self._remove_leases_by_mac_address(host.mgmt_mac)
self._generate_dnsmasq_hosts_file(deleted_host=host)
def _remove_addresses_for_host(self, host):
"""Removes management addresses for a given host.
:param host: host object
"""
hostname = host.hostname
self._remove_address(hostname, constants.NETWORK_TYPE_MGMT)
self._remove_leases_by_mac_address(host.mgmt_mac)
self._generate_dnsmasq_hosts_file(deleted_host=host)
def _configure_controller_host(self, context, host):
"""Configure a controller host with the supplied data.
Does the following tasks:
- Update the puppet hiera data configuration for host
- Allocates management address if none exists
- Set up PXE configuration to run installer
- Update grub for AIO before initial unlock
:param context: request context
:param host: host object
"""
# Only update the config if the host is running the same version as
# the active controller.
if self.host_load_matches_sw_version(host):
if (host.administrative == constants.ADMIN_UNLOCKED or
host.action == constants.FORCE_UNLOCK_ACTION or
host.action == constants.UNLOCK_ACTION):
# Update host configuration
self._puppet.update_host_config(host)
else:
LOG.info("Host %s is not running active load. "
"Skipping manifest generation" % host.hostname)
self._allocate_addresses_for_host(context, host)
# Set up the PXE config file for this host so it can run the installer
self._update_pxe_config(host)
self._ceph_mon_create(host)
if (os.path.isfile(constants.ANSIBLE_BOOTSTRAP_FLAG) and
host.availability == constants.AVAILABILITY_ONLINE):
# This must be the initial controller host unlock request.
personalities = [constants.CONTROLLER]
if not cutils.is_aio_system(self.dbapi):
# Standard system, touch the unlock ready flag
cutils.touch(constants.UNLOCK_READY_FLAG)
else:
# AIO, must update grub before the unlock. Sysinv agent expects
# this exact set of manifests in order to touch the unlock ready
# flag after they have been applied.
config_uuid = self._config_update_hosts(context, personalities,
host_uuids=[host.uuid])
if self._config_is_reboot_required(host.config_target):
config_uuid = self._config_set_reboot_required(config_uuid)
config_dict = {
"personalities": personalities,
"host_uuids": [host.uuid],
"classes": ['platform::compute::grub::runtime',
'platform::compute::config::runtime']
}
self._config_apply_runtime_manifest(
context, config_uuid, config_dict, force=True)
# Regenerate config target uuid, node is going for reboot!
config_uuid = self._config_update_hosts(context, personalities)
if self._config_is_reboot_required(host.config_target):
config_uuid = self._config_set_reboot_required(config_uuid)
self._puppet.update_host_config(host, config_uuid)
def _ceph_mon_create(self, host):
if not StorageBackendConfig.has_backend(
self.dbapi,
constants.CINDER_BACKEND_CEPH
):
return
if not self.dbapi.ceph_mon_get_by_ihost(host.uuid):
system = self.dbapi.isystem_get_one()
ceph_mon_gib = None
ceph_mons = self.dbapi.ceph_mon_get_list()
if ceph_mons:
ceph_mon_gib = ceph_mons[0].ceph_mon_gib
values = {'forisystemid': system.id,
'forihostid': host.id,
'ceph_mon_gib': ceph_mon_gib,
'state': constants.SB_STATE_CONFIGURED,
'task': constants.SB_TASK_NONE}
LOG.info("creating ceph_mon for host %s with ceph_mon_gib=%s."
% (host.hostname, ceph_mon_gib))
self.dbapi.ceph_mon_create(values)
def _remove_ceph_mon(self, host):
if not StorageBackendConfig.has_backend(
self.dbapi,
constants.CINDER_BACKEND_CEPH
):
return
mon = self.dbapi.ceph_mon_get_by_ihost(host.uuid)
if mon:
LOG.info("Deleting ceph monitor for host %s"
% str(host.hostname))
self.dbapi.ceph_mon_destroy(mon[0].uuid)
self._ceph.remove_ceph_monitor(host.hostname)
else:
LOG.info("No ceph monitor present for host %s. "
"Skipping deleting ceph monitor."
% str(host.hostname))
def update_remotelogging_config(self, context):
"""Update the remotelogging configuration"""
personalities = [constants.CONTROLLER,
constants.WORKER,
constants.STORAGE]
config_uuid = self._config_update_hosts(context, personalities)
config_dict = {
"personalities": [constants.CONTROLLER],
"classes": ['platform::sysctl::controller::runtime',
'platform::remotelogging::runtime']
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
config_dict = {
"personalities": [constants.WORKER, constants.STORAGE],
"classes": ['platform::remotelogging::runtime'],
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def docker_registry_image_list(self, context):
image_list_response = docker_registry.docker_registry_get("_catalog")
if image_list_response.status_code != 200:
LOG.error("Bad response from docker registry: %s"
% image_list_response.status_code)
return []
image_list_response = image_list_response.json()
images = []
# responses from the registry looks like this
# {u'repositories': [u'meliodas/satesatesate', ...]}
# we need to turn that into what we want to return:
# [{'name': u'meliodas/satesatesate'}]
if 'repositories' not in image_list_response:
return images
image_list_response = image_list_response['repositories']
for image in image_list_response:
images.append({'name': image})
return images
def docker_registry_image_tags(self, context, image_name):
image_tags_response = docker_registry.docker_registry_get(
"%s/tags/list" % image_name)
if image_tags_response.status_code != 200:
LOG.error("Bad response from docker registry: %s"
% image_tags_response.status_code)
return []
image_tags_response = image_tags_response.json()
tags = []
if 'tags' not in image_tags_response:
return tags
image_tags_response = image_tags_response['tags']
# in the case where all tags of an image is deleted but not
# garbage collected
# the response will contain "tags:null"
if image_tags_response is not None:
for tag in image_tags_response:
tags.append({'name': image_name, 'tag': tag})
return tags
# assumes image_name_and_tag is already error checked to contain "name:tag"
def docker_registry_image_delete(self, context, image_name_and_tag):
image_name_and_tag = image_name_and_tag.split(":")
# first get the image digest for the image name and tag provided
digest_resp = docker_registry.docker_registry_get("%s/manifests/%s"
% (image_name_and_tag[0], image_name_and_tag[1]))
if digest_resp.status_code != 200:
LOG.error("Bad response from docker registry: %s"
% digest_resp.status_code)
return
image_digest = digest_resp.headers['Docker-Content-Digest']
# now delete the image
image_delete_response = docker_registry.docker_registry_delete(
"%s/manifests/%s" % (image_name_and_tag[0], image_digest))
if image_delete_response.status_code != 202:
LOG.error("Bad response from docker registry: %s"
% digest_resp.status_code)
return
def docker_registry_garbage_collect(self, context):
"""Run garbage collector"""
active_controller = utils.HostHelper.get_active_controller(self.dbapi)
personalities = [constants.CONTROLLER]
config_uuid = self._config_update_hosts(context, personalities,
[active_controller.uuid])
config_dict = {
"personalities": personalities,
"host_uuids": [active_controller.uuid],
"classes": ['platform::dockerdistribution::garbagecollect']
}
self._config_apply_runtime_manifest(context, config_uuid, config_dict)
def _configure_worker_host(self, context, host):
"""Configure a worker host with the supplied data.
Does the following tasks:
- Create or update entries in address table
- Generate the configuration file for the host
- Allocates management address if none exists
- Set up PXE configuration to run installer
:param context: request context
:param host: host object
"""
# Only update the config if the host is running the same version as
# the active controller.
if self.host_load_matches_sw_version(host):
# Only generate the config files if the worker host is unlocked.
if (host.administrative == constants.ADMIN_UNLOCKED or
host.action == constants.FORCE_UNLOCK_ACTION or
host.action == constants.UNLOCK_ACTION):
# Generate host configuration file
self._puppet.update_host_config(host)
else:
LOG.info("Host %s is not running active load. "
"Skipping manifest generation" % host.hostname)
self._allocate_addresses_for_host(context, host)
# Set up the PXE config file for this host so it can run the installer
self._update_pxe_config(host)
def _configure_storage_host(self, context, host):
"""Configure a storage ihost with the supplied data.
Does the following tasks:
- Update the puppet hiera data configuration for host
- Allocates management address if none exists
- Set up PXE configuration to run installer
:param context: request context
:param host: host object
"""
# Update cluster and peers model
self._ceph.update_ceph_cluster(host)
# Only update the manifest if the host is running the same version as
# the active controller.
if self.host_load_matches_sw_version(host):
# Only generate the manifest files if the storage host is unlocked.
# At that point changes are no longer allowed to the hostname, so
# it is OK to allow the node to boot and configure the platform
# services.
if (host.administrative == constants.ADMIN_UNLOCKED or
host.action == constants.FORCE_UNLOCK_ACTION or
host.action == constants.UNLOCK_ACTION):
# Generate host configuration files
self._puppet.update_host_config(host)
else:
LOG.info("Host %s is not running active load. "
"Skipping manifest generation" % host.hostname)
self._allocate_addresses_for_host(context, host)
# Set up the PXE config file for this host so it can run the installer
self._update_pxe_config(host)
if host['hostname'] == constants.STORAGE_0_HOSTNAME:
self._ceph_mon_create(host)
def remove_host_config(self, context, host_uuid):
"""Remove configuration files for a host.
:param context: an admin context.
:param host_uuid: host uuid.
"""
host = self.dbapi.ihost_get(host_uuid)
self._puppet.remove_host_config(host)
def _unconfigure_controller_host(self, host):
"""Unconfigure a controller host.
Does the following tasks:
- Remove the puppet hiera data configuration for host
- Remove host entry in the dnsmasq hosts file
- Delete PXE configuration
:param host: a host object.
"""
self._unallocate_addresses_for_host(host)
self._puppet.remove_host_config(host)
self._remove_pxe_config(host)
# Create the simplex flag on this controller because our mate has
# been deleted.
cutils.touch(tsc.PLATFORM_SIMPLEX_FLAG)
if host.hostname == constants.CONTROLLER_0_HOSTNAME:
self.controller_0_posted = False
elif host.hostname == constants.CONTROLLER_1_HOSTNAME:
self.controller_1_posted = False
def _unconfigure_worker_host(self, host, is_cpe=False):
"""Unconfigure a worker host.
Does the following tasks:
- Remove the puppet hiera data configuration for host
- Remove the host entry from the dnsmasq hosts file
- | |
m
t6 = t2 ** 2 % self.curve.p # e
t7 = t6 ** 2 % self.curve.p # l
t8 = t1 + t6
t8 = t8 ** 2 % self.curve.p
t8 = t8 - t4
t8 = t8 - t7
t8 = 2 * t8 # s
t9 = t5 ** 2 % self.curve.p
t9 = (t9 - 2 * t8) % self.curve.p # x(2p)
t10 = t8 - t9
t10 = t5 * t10 % self.curve.p
t11 = 8 * t7 # 8l
t10 = (t10 - t11) % self.curve.p # y(2p)
t12 = 2 * t2 % self.curve.p # z(2p)
t1 = 4 * t1
t1 = t1 * t6 % self.curve.p
t2 = 8 * t7 % self.curve.p
return (JacobianPoint(t9, t10, t12, self.curve),
JacobianPoint(t1, t2, t12, self.curve))
def scalar_multiplication(self, k, k_num_bits, p):
"""
Montgomery ladder. Compute k * p.
This algorithm does not work for k=0.
"""
r0 = p
r1, r0 = self.dblu(r0)
for pos in range(k_num_bits - 2, -1, -1):
cur_bit = (k >> pos) & 1
r1._swap_coordinates(1 - cur_bit, r0)
r0, r1 = self.zaddc(r1, r0)
r1, r0 = self.zaddu(r0, r1)
r1._swap_coordinates(1 - cur_bit, r0)
return r0
class JacobianPoint:
"""
Point representation in Jacobian coordinates. It uses Co-Z arithmetic
[1]_ to compute operations between curve points.
"""
def __init__(self, x, y, z, curve):
"""
:py:attr:`x`, :py:attr:`y`, :py:attr:`z` are the Jacobian coordinates of
this point, ``curve`` is the underlying/associated curve. ``curve`` must
be a valid curve, it is the responsability of the caller to provide a
valid and secure curve. ``curve`` is usually an instance of
:py:class:`_Curve`.
"""
_check_integer_type(x)
_check_integer_type(y)
_check_integer_type(z)
self.x = x
self.y = y
self.z = z
self.curve = curve
self.cozarithmetic = _CoZArithmetic(self.curve)
self.zarithmetic = _ZpZArithmetic.create_from_curve(self.curve)
def _swap_coordinates(self, swap, point):
"""
Conditionally swap the current coordinates values with those of 'point'.
Coordinates are swapped if swap=1 and are left unchanged if swap=0. This
value must be 1 or 0 exclusively.
"""
for coord in ('x', 'y', 'z'):
t0 = getattr(self, coord)
t1 = getattr(point, coord)
t0, t1 = _cond_swap_values(swap, t0, t1)
setattr(self, coord, t0)
setattr(point, coord, t1)
def _to_equivalent(self, lmbda):
"""
Compute (lmbda^2.x, lmbda^3.y, lmbda.z) in-place.
"""
_check_integer_type(lmbda)
assert not self.is_at_infinity()
t1 = lmbda ** 2 % self.curve.p
self.x = self.x * t1 % self.curve.p
t1 = t1 * lmbda % self.curve.p
self.y = self.y * t1 % self.curve.p
self.z = self.z * lmbda % self.curve.p
def canonicalize(self):
"""
Transform this point to an equivalent and unique representative taking
1 as z coordinate in ``(x : y : 1)`` when the point is not at infinity
and taking x, y as 1 in ``(1 : 1 : 0)`` when the point is at infinity.
This method is used for faciliting points comparisons and to convert a
point to its affine representation. Before any transformation takes
place this method checks that the point is on the curve.
"""
# The point must be a valid point on curve. Otherwise it would
# modify this point to a non-equivalent representation.
assert self.is_on_curve()
# Already canonicalized.
if self.z % self.curve.p == 1:
return
# Point at infinity.
if self.is_at_infinity():
self.x = self.y = 1
self.z = 0
else:
# k is public so there is no worry about using bit_length() here.
t1 = self.zarithmetic.exp(self.z, 3, _bit_length(3))
t1 = self.zarithmetic.inverse(t1)
self.y = t1 * self.y % self.curve.p
t1 = t1 * self.z
self.x = t1 * self.x % self.curve.p
self.z = 1
def to_affine(self):
"""
Convert this point to its affine representation
(x/z\ :sup:`2`, y/z\ :sup:`3`). Does not work for point at infinity.
"""
assert not self.is_at_infinity()
self.canonicalize()
return self.x, self.y
@staticmethod
def from_affine(x, y, curve):
"""
Returns a new JacobianPoint from affine coordinates ``x, y``, ``curve``
is an instance of :py:class:`_Curve`, see :py:meth:`__init__` for more
details.
"""
return JacobianPoint(x, y, 1, curve)
def get_affine_x(self):
"""
Returns the affine coordinate :py:attr:`x` of this point.
"""
return self.to_affine()[0]
def get_affine_y(self):
"""
Returns the affine coordinate :py:attr:`y` of this point.
"""
return self.to_affine()[1]
def compression_bit_y(self):
"""
Return the compression bit odd(:py:attr:`y`) associated to the
:py:attr:`y` coordinate. Does not work for the point at infinity.
See example in :py:meth:`uncompress`.
"""
assert not self.is_at_infinity()
self.canonicalize()
return self.y & 1
@staticmethod
def uncompress(x, bit_y, curve):
"""
Uncompress and construct the Jacobian point represented by :py:attr:`x`
and :py:attr:`bit_y`. See :py:meth:`compression_bit_y` for how
:py:attr:`bit_y` was initially obtained. curve's order must be a
congruent of 3 mod 4.
Example::
curve = wcurve.secp256r1_curve()
bit_y = curve.base_point.compression_bit_y()
# p is a copy of the base point curve.base_point
p = wcurve.JacobianPoint.uncompress(curve.base_point.x, bit_y, curve)
"""
_check_integer_type(x)
assert bit_y in (0, 1)
assert curve.p % 4 == 3 # Required by the square root formulae.
# y**2 = x**3 + ax + b
t = x ** 3 % curve.p
y2 = (t + curve.a * x + curve.b) % curve.p
# y = +/- y2 ** ((p + 1) / 4)
e = (curve.p + 1) >> 2
y = _ZpZArithmetic.create_from_curve(curve).exp(y2, e, _bit_length(e))
if (y & 1) != bit_y:
assert y != 0
y = -y % curve.p
assert (y & 1) == bit_y
return JacobianPoint.from_affine(x, y, curve)
def is_at_infinity(self):
"""
Returns ``True`` if this point is at infinity. This method is part of
the validation done by :py:meth:`is_valid`.
"""
return self.z % self.curve.p == 0
def has_valid_order(self):
"""
Returns ``True`` if the order of this point is the same than the order
of the base point. This method is a step of the validation performed
by :py:meth:`is_valid`.
"""
if self.is_at_infinity():
return False
# Skip scalar mult if cofactor h=1
if self.curve.h == 1:
return True
p = self._scalar_multiplication(self.curve.n, _bit_length(self.curve.n))
return p.is_at_infinity()
def is_on_curve(self):
"""
Returns ``True`` if this point is on curve. This method is a step of the
validation performed by :py:meth:`is_valid`.
"""
t1 = self.y ** 2 % self.curve.p
t2 = self.x ** 3 % self.curve.p
t3 = self.z ** 3 % self.curve.p
t4 = self.curve.a * self.x
t4 = t4 * t3 * self.z % self.curve.p
t2 = t2 + t4
t3 = t3 ** 2 % self.curve.p
t3 = self.curve.b * t3
t2 = (t2 + t3) % self.curve.p
return t1 == t2
def is_valid(self):
"""
Returns ``True`` if this point is valid.
It checks that this point ``P`` meets the following requirements:
1. ``P != O``
2. ``P`` is on curve
3. ``n * P = O``
"""
if self.is_at_infinity():
return False
if not self.is_on_curve():
return False
if not self.has_valid_order():
return False
return True
def __add__(self, point):
"""
Adds up together this point with another point and returns the result.
Very inefficient algorithm when used for double scalar multiplication,
the only upside in this case is that it is formed of regular operations.
Additions with identity points are handled as special cases.
Usually points are public elements (at least in the algorithms I know)
therefore we're being slightly less careful in how we are manipulating
and comparing them.
"""
if not isinstance(point, JacobianPoint):
raise TypeError("Invalid type %s, expected type %s." % \
(type(point), JacobianPoint))
if self.is_at_infinity():
return copy.copy(point)
elif point.is_at_infinity():
return copy.copy(self)
if self == point:
# The formulaes forbid adding together two identical points, but we
# can double one of them.
return 2 * self
# The two points must share the same z coordinates, it should be more
# efficient to call _to_equivalent() than to_affine() which would use a
# costly modular exponentiation with a big exponent.
tmpz = self.z
self._to_equivalent(point.z)
point._to_equivalent(tmpz)
r, _ = self.cozarithmetic.zaddu(self, point)
return r
def __sub__(self, point):
point = -point
return self + point
def _scalar_multiplication(self, scalar, scalar_num_bits):
"""
Do not call this method directly unless you know what you're doing.
Instead use __mul__ and __rmul__ methods.
"""
self.canonicalize()
return self.cozarithmetic.scalar_multiplication(scalar,
scalar_num_bits,
self)
def scalar_multiplication(self, scalar):
"""
This method does the scalar multiplication of the | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sentencepiece as spm
import unicodedata
import six
import logging
from malaya.text.function import transformer_textcleaning
SEG_ID_A = 0
SEG_ID_B = 1
SEG_ID_P = 0
SEG_ID_Q = 1
SEG_ID_CLS = 2
SEG_ID_SEP = 3
SEG_ID_PAD = 4
special_symbols = {
'<unk>': 0,
'<s>': 1,
'</s>': 2,
'<cls>': 3,
'<sep>': 4,
'<pad>': 5,
'<mask>': 6,
'<eod>': 7,
'<eop>': 8,
}
UNK_ID = special_symbols['<unk>']
CLS_ID = special_symbols['<cls>']
SEP_ID = special_symbols['<sep>']
MASK_ID = special_symbols['<mask>']
EOD_ID = special_symbols['<eod>']
SPIECE_UNDERLINE = '▁'
MAXLEN = 508
SPECIAL_TOKENS = {
'bert': {'pad': '[PAD]', 'cls': '[CLS]', 'sep': '[SEP]'},
'xlnet': {'pad': '<pad>', 'cls': '<cls>', 'sep': '<sep>'},
}
BERT_TOKEN_MAPPING = {
'-LRB-': '(',
'-RRB-': ')',
'-LCB-': '{',
'-RCB-': '}',
'-LSB-': '[',
'-RSB-': ']',
'``': '"',
"''": '"',
'`': "'",
'«': '"',
'»': '"',
'‘': "'",
'’': "'",
'“': '"',
'”': '"',
'„': '"',
'‹': "'",
'›': "'",
'\u2013': '--', # en dash
'\u2014': '--', # em dash
}
PTB_TOKEN_ESCAPE = {
'(': '-LRB-',
')': '-RRB-',
'{': '-LCB-',
'}': '-RCB-',
'[': '-LSB-',
']': '-RSB-',
}
class SentencePieceTokenizer:
def __init__(self, vocab_file, spm_model_file, **kwargs):
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(spm_model_file)
with open(vocab_file) as fopen:
v = fopen.read().split('\n')[:-1]
v = [i.split('\t') for i in v]
self.vocab = {i[0]: i[1] for i in v}
def tokenize(self, string):
return encode_sentencepiece(
self.sp_model, string, return_unicode=False, sample=False
)
def convert_tokens_to_ids(self, tokens):
return [
self.sp_model.PieceToId(printable_text(token)) for token in tokens
]
def convert_ids_to_tokens(self, ids):
return [self.sp_model.IdToPiece(id_) for id_ in ids]
class SentencePieceEncoder:
def __init__(self, vocab_file, **kwargs):
sp = spm.SentencePieceProcessor()
sp.Load(vocab_file)
self.sp = sp
self.vocab_size = sp.GetPieceSize() + 100
def encode(self, s):
return self.sp.EncodeAsIds(s)
def decode(self, ids, strip_extraneous=False):
return self.sp.DecodeIds(list(ids))
class SentencePieceBatchEncoder:
def __init__(self, vocab_file, **kwargs):
sp = spm.SentencePieceProcessor()
sp.Load(vocab_file)
self.sp = sp
self.vocab_size = sp.GetPieceSize() + 100
def encode(self, s):
s = [self.sp.EncodeAsIds(i) + [1] for i in s]
return s
def decode(self, ids, strip_extraneous=False):
return [self.sp.DecodeIds(list(i)) for i in ids]
class YTTMEncoder:
def __init__(self, vocab_file, id_mode=False, **kwargs):
try:
import youtokentome as yttm
except BaseException:
raise ModuleNotFoundError(
'youtokentome not installed. Please install it by `pip install youtokentome` and try again.'
)
if id_mode:
type = yttm.OutputType.ID
else:
type = yttm.OutputType.SUBWORD
self.bpe = yttm.BPE(model=vocab_file)
self.vocab_size = len(self.bpe.vocab())
self.mode = type
def encode(self, s):
s = self.bpe.encode(s, output_type=self.mode)
s = [i + [1] for i in s]
return s
def decode(self, ids, strip_extraneous=False):
ids = [[k for k in i if k > 1] for i in ids]
return self.bpe.decode(list(ids))
class WordPieceTokenizer(object):
def __init__(self, vocab_file, do_lower_case=False, **kwargs):
self.vocab = load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = InternalWordPieceTokenizer(vocab=self.vocab)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
return self.convert_by_vocab(self.vocab, tokens)
def convert_ids_to_tokens(self, ids):
return self.convert_by_vocab(self.inv_vocab, ids)
def convert_by_vocab(self, vocab, items):
output = []
for item in items:
output.append(vocab[item])
return output
def merge_ids_to_string(self, ids):
tokens = self.convert_ids_to_tokens(ids)
new_tokens = []
n_tokens = len(tokens)
i = 0
while i < n_tokens:
current_token = tokens[i]
if current_token.startswith('##'):
previous_token = new_tokens.pop()
merged_token = previous_token
while current_token.startswith('##'):
merged_token = merged_token + current_token.replace('##', '')
i = i + 1
current_token = tokens[i]
new_tokens.append(merged_token)
else:
new_tokens.append(current_token)
i = i + 1
words = [
i
for i in new_tokens
if i not in ['[CLS]', '[SEP]', '[PAD]']
]
return ' '.join(words)
class BasicTokenizer(object):
def __init__(self, do_lower_case=True, **kwargs):
self.do_lower_case = do_lower_case
def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(' '.join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == 'Mn':
continue
output.append(char)
return ''.join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return [''.join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
def _is_chinese_char(self, cp):
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output)
class InternalWordPieceTokenizer(object):
def __init__(
self, vocab, unk_token='[UNK]', max_input_chars_per_word=200
):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
text = convert_to_unicode(text)
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = ''.join(chars[start:end])
if start > 0:
substr = '##' + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
if char == ' ' or char == '\t' or char == '\n' or char == '\r':
return True
cat = unicodedata.category(char)
if cat == 'Zs':
return True
return False
def _is_control(char):
if char == '\t' or char == '\n' or char == '\r':
return False
cat = unicodedata.category(char)
if cat in ('Cc', 'Cf'):
return True
return False
def _is_punctuation(char):
cp = ord(char)
if (
(cp >= 33 and cp <= 47)
or (cp >= 58 and cp <= 64)
or (cp >= 91 and cp <= 96)
or (cp >= 123 and cp <= 126)
):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False
def whitespace_tokenize(text):
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def convert_to_unicode(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError('Unsupported string type: %s' % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
return text
else:
raise ValueError('Unsupported string type: %s' % (type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?')
def padding_sequence(seq, maxlen=None, padding='post', pad_int=0):
if not maxlen:
maxlen = max([len(i) for i in seq])
padded_seqs = []
for s in seq:
if padding == 'post':
padded_seqs.append(s + [pad_int] * (maxlen - len(s)))
if padding == 'pre':
padded_seqs.append([pad_int] * (maxlen - len(s)) + s)
return padded_seqs
def bert_tokenization(tokenizer, texts):
input_ids, input_masks, segment_ids, s_tokens = [], [], [], []
for text in texts:
text = transformer_textcleaning(text)
tokens_a = tokenizer.tokenize(text)[:MAXLEN]
logging.debug(tokens_a)
tokens = ['[CLS]'] + tokens_a + ['[SEP]']
segment_id = [0] * len(tokens)
input_id = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_id)
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
s_tokens.append(tokens)
maxlen = max([len(i) for i in input_ids])
input_ids = padding_sequence(input_ids, maxlen)
input_masks = padding_sequence(input_masks, maxlen)
segment_ids = padding_sequence(segment_ids, maxlen)
return input_ids, input_masks, segment_ids, s_tokens
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def bert_tokenization_siamese(tokenizer, left, right):
input_ids, input_masks, segment_ids, s_tokens = [], [], [], []
a, b = [], []
for i in range(len(left)):
tokens_a = tokenizer.tokenize(transformer_textcleaning(left[i]))
logging.debug(tokens_a)
tokens_b = tokenizer.tokenize(transformer_textcleaning(right[i]))
logging.debug(tokens_b)
a.append(tokens_a)
b.append(tokens_b)
for i in range(len(left)):
tokens_a = a[i]
tokens_b = b[i]
tokens = []
segment_id = []
tokens.append('[CLS]')
segment_id.append(0)
for token in tokens_a:
tokens.append(token)
segment_id.append(0)
tokens.append('[SEP]')
s_tokens.append(tokens[:])
segment_id.append(0)
for token in tokens_b:
tokens.append(token)
segment_id.append(1)
tokens.append('[SEP]')
segment_id.append(1)
input_id = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_id)
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
maxlen = max([len(i) for i in input_ids])
input_ids = padding_sequence(input_ids, maxlen)
input_masks = padding_sequence(input_masks, maxlen)
segment_ids = padding_sequence(segment_ids, maxlen)
return input_ids, input_masks, segment_ids, s_tokens
def printable_text(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, | |
#!/usr/bin/env python3
# vim: ft=python:nospell:sta:et:sw=2:ts=2:sts=2
"""
## Breaking bad
"I am the one who knocks."
.-------.
| |
-=_________=-
___ ___
|___)=(___|
|
###
#####
# = #
###
- Divide data into regions of bad and better.
- Find the least you need to do to nudge things away from bad.
```
Better
.------.
| 36| <--- a score you want to minimize
| Br |
.______.______.
| 56| <--- a larger, hence worse, score
| Ba |
.______.
Bad
```
"""
def config():
"""Set global options.i to change these, at the comamnd-line,
type
./braknbad GROUP -OPTION value
where GROUP is one of char,dist,do,etc and OPTION
is some sub-setting in GROUP; e.g.
./braknbad dist -p 3
"""
return o(
char = o( no = "?",
less = "<",
more = ">",
sep = ",",
doomed= r'([\n\t\r ]|#.*)'),
dist = o( p=2 ),
do = o( run=r"." ),
nb = o( k=1, m=2 ),
row = o( doms = 64 ),
two = o( better = 0.2 ),
div = o( cohen = 0.3,
trivial= 1.05,
min = 0.6)
)
#--------- --------- --------- --------- -------- ----------
import re,sys,traceback,random
from copy import deepcopy as kopy
class o:
"""A few of my favorite things; (1) fast creation
of instance attributes; (2) hash id management;
(3) pretty print of instances."""
id = 0
"used to generate ids for an instance"
def __init__(i,**d) : i.__dict__.update(**d)
def identity(i):
"assign a new unique integer as 'id' of this instance."
i.id = o.id = o.id+ 1;return i.id
def __hash__(i):
"Allow this instance to be used in sets or dictionary keys."
return i.id
def __repr__(i):
"""Print attributes in alphabetical order, skipping
'private' attributes (those starting with '_')."""
lst= [(k, v) for k,v in i.__dict__.items() if k[0] != "_"]
return i.__class__.__name__ + '{' + ", ".join(
[('%s=%s' % (k, v)) for k,v in sorted(lst)]) +'}'
any = random.choice
p=lambda z: int(round(z*100,0))
def same(x):
"The identity function. Returns 'x'."
return x
def atom(x):
"Coerce the string x into the right kind of atom."
try: return int(x)
except:
try: return float(x)
except: return x
def cli(d,args=sys.argv[1:]):
"""Allow command lines args to update fields in the THE object.
Example usage: THE = cli(THE)."""
args = [atom(x) for x in args]
what = {}
groups = d.__dict__
while args:
arg = args.pop(0)
if arg in groups:
what = groups[arg].__dict__
else:
assert isinstance(arg,str) and arg[0] == "-", "bad flag '%s'" %arg
arg = arg[1:]
assert arg in what, "%s not one of %s" % (arg,list(what.keys()))
old = what[arg]
if isinstance(old, bool):
what[arg] = not what[arg]
else:
val = args.pop(0)
assert type(old)==type(val), "'%s' value not of type '%s'"%(
arg,type(old))
what[arg] = val
return d
THE=config()
class Eg(o):
"Manage a list of demo/test functions."
all = [] # asdas"
"Place to store demos/tests."
n = 0
"Number of called demos/tests."
tell= "#Tests 0 tries 0 fails 100% passes."
"Report string (summarizes the run)."
y = 0
"Number of demos/tests with assert failers"
def run():
"Run the list of demos/tests."
[Eg.run1(one) for one in Eg.all]
def run1(f):
"Run a demo/test. If it fails, update the Eg.no counter."
if re.match(THE.do.run, f.__name__):
print("\n-----| %s |%s" % (f.__name__,"-"*40))
if f.__doc__:
print("# "+ re.sub(r'\n[ \t]*',"\n# ", f.__doc__))
Eg.y += 1
try:
f()
except:
Eg.n += 1
y,n = Eg.y, Eg.n
print(traceback.format_exc())
Eg.tell = "#Tests %s tries %s fails %s%% passed." % (
y,n, round(100*(y/(y+n+0.0001)),0))
def eg(f):
"""Convenience function. Decorator that adds
functions to the list managed by the `Eg` class."""
Eg.all += [f]; return f
class Row(o):
"Holder for lists of values and their discretized ranges."
def __init__(i,lst):
i.cells=lst
i.ranges=[None]*len(lst)
i.score=0
def scoring(i,j,t):
"Check if this row 'i' is better than row 'j'."
n = len(t.cols.objs)
s1 = s2 = 0
for c in t.cols.objs:
x,y = i.cells[c.pos], j.cells[c.pos]
x,y = c.norm(x), c.norm(y)
s1 -= 10**( c.w * (x-y)/n )
s2 -= 10**( c.w * (y-x)/n )
return s1/n < s2/n
class Tbl(o):
"""Manage a list of Rows. Keep statistics on each column
in `objs` or `decs` objects (which are either `Num` or
`Sym` instances)."""
def __init__(i,names=[], rows=None):
i.rows = []
i.cols = o(all=[], decs = [], objs=[], numdecs=[])
if names:
i.header(names)
i.names = names
if rows:
i.read(rows)
i.names = [ c.txt for c in i.cols.all ]
def centroid(i):
"Return the middle."
return [ c.centroid() for c in i.cols.all ]
def scoring(i):
"""Score each row (using the row's `scoring` method).
If there are too many rows to score fully, then
just score against a random number."""
n = THE.row.doms
for r1 in i.rows:
if n < len(i.rows):
tmp= sum(r1.scoring(any(i.rows),i) for _ in range(n))
else:
tmp= sum(r1.scoring(r2,i) for r2 in i.rows)
r1.score = tmp/n
def cook(i):
i.scoring()
r=[]
for col in i.cols.numdecs:
d= Div2(i.rows,
x=lambda row: row.cells[col.pos],
y=lambda row: row.score)
r+= d.ranges
return d.b4, sorted(r,key=lambda z:z.stats.mu)
def clone(i):
"""Return an (empty) table that can read rows like
those seen in this table."""
return Tbl(i.names)
def header(i,names):
"Convert a list of names into `Num`s and `Sym`s as appropriate."
for n,s in enumerate(names):
w = -1 if s[0] == '<' else 1
x = (Num if s[0] in "<>$" else Sym)(s,n,w)
what = i.cols.objs if s[0] in '<>' else i.cols.decs
what += [x]
i.cols.all += [x]
if s[0] == "$": i.cols.numdecs += [x]
def read(i,src):
"""Read rows from some src. Add to this table.
If this table does not know what is columns are yet,
then read those from the first row."""
for row in src:
i + row if i.cols.all else i.header(row)
def __add__(i,lst):
"Add a new row. Update the column statistics."
lst = [ c + lst[c.pos] for c in i.cols.all ]
i.rows += [ Row(lst) ]
def like(i,lst,ns):
"Report the likelihood that `lst` belongs in this table."
n = len(i.rows)
k, m = THE.nb.k, THE.nb.m
like = prior = (n + k) / (ns + k*2)
for c in i.all.decs:
x = lst[c.pos]
if x == THE.char.no: continue
f = c.bag.get(x,0)
inc = (f + m*prior) / (n + m)
like *= inc
return like
class Thing(o):
def xpect(i,j):
n = i.n + j.n
return i.n/n * i.variety() + j.n/n * j.variety()
class Num(Thing):
def __init__(i,txt="",pos=0,w=1,key=same,inits=[]):
i.id = i.identity()
i.txt,i.pos = txt,pos
i.w=w
i.n,i.mu,i.m2 = 0,0,0
i.lo,i.hi = 10**32, -10**32
i.key = key
[i + one for one in inits]
def variety(i) : return i.sd
def centroid(i): return i.mu
def norm(i,x):
return (x - i.lo) / (i.hi - i.lo + 10**-32)
def __add__(i,x):
x = i.key(x)
if x == THE.char.no: return x
x = float(x)
if x < i.lo: i.lo = x
if x > i.hi: i.hi = x
i.n += 1
d = x - i.mu
i.mu += d/i.n
i.m2 += d*(x - i.mu)
i.sd = i.sd0()
return x
def __sub__(i, x):
if i.n < 2:
i.n, i.mu, i.m2 = 0, 0, 0
else:
i.n -= 1
x = i.key(x)
d = x - i.mu
i.mu -= d / i.n
i.m2 -= d * (x - i.mu)
i.sd = i.sd0()
return x
def sd0(i):
if i.m2 < 0: return 0
if i.n < 2: return 0
return (i.m2/(i.n - 1 + 10**-32))**0.5
class Sym(Thing):
def __init__(i,txt="",pos=0,w=1, key=same, inits=[]):
i.id = i.identity()
i.txt,i.pos = txt,pos
i.w=w
i.n,i.most,i.mode,i.bag = 0,0,None,{}
i.key=key
i._ent=None
[i + one for one in inits]
def variety(i): return i.ent()
def centroid(i): return i.mode
def __add__(i,x):
x = i.key(x)
if x == THE.char.no: return x
i._ent= None
i.n += 1
c = i.bag[x] = i.bag.get(x,0) + 1
if c > i.most:
i.most, i.mode = c, x
return x
def ent(i):
if not i._ent:
i._ent = 0
for _,v in i.bag.items():
p = v/i.n
i._ent -= p*math.log(p,2)
return i._ent
class Two(o):
"""Stores two tables: one for `_bad` things
and one for `_better` things."""
def __init__(i,t, lst):
i.bad, i.better = t.clone(), t.clone()
lst = sorted(lst, key=lambda z:z.count)
n = int(len(lst)*THE.two.better)
for m,one in enumerate(lst):
a = one.cells
(i.bad + a) if m | |
err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_add_valid(capsys):
"""Test --ch-add with valid channel name, and that channel name does not already exist"""
sys.argv = ['', '--ch-add', 'testing']
Globals.getInstance().set_args(sys.argv)
mocked_channel = MagicMock(autospec=Channel)
# TODO: figure out how to get it to print the channel name instead of MagicMock
mocked_node = MagicMock(autospec=Node)
# set it up so we do not already have a channel named this
mocked_node.getChannelByName.return_value = False
# set it up so we have free channels
mocked_node.getDisabledChannel.return_value = mocked_channel
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Writing modified channels to device', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_add_invalid_name_too_long(capsys):
"""Test --ch-add with invalid channel name, name too long"""
sys.argv = ['', '--ch-add', 'testingtestingtesting']
Globals.getInstance().set_args(sys.argv)
mocked_channel = MagicMock(autospec=Channel)
# TODO: figure out how to get it to print the channel name instead of MagicMock
mocked_node = MagicMock(autospec=Node)
# set it up so we do not already have a channel named this
mocked_node.getChannelByName.return_value = False
# set it up so we have free channels
mocked_node.getDisabledChannel.return_value = mocked_channel
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: Channel name must be shorter', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_add_but_name_already_exists(capsys):
"""Test --ch-add with a channel name that already exists"""
sys.argv = ['', '--ch-add', 'testing']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
# set it up so we do not already have a channel named this
mocked_node.getChannelByName.return_value = True
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: This node already has', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_add_but_no_more_channels(capsys):
"""Test --ch-add with but there are no more channels"""
sys.argv = ['', '--ch-add', 'testing']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
# set it up so we do not already have a channel named this
mocked_node.getChannelByName.return_value = False
# set it up so we have free channels
mocked_node.getDisabledChannel.return_value = None
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: No free channels were found', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_del(capsys):
"""Test --ch-del with valid secondary channel to be deleted"""
sys.argv = ['', '--ch-del', '--ch-index', '1']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Deleting channel', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_del_no_ch_index_specified(capsys):
"""Test --ch-del without a valid ch-index"""
sys.argv = ['', '--ch-del']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: Need to specify', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_del_primary_channel(capsys):
"""Test --ch-del on ch-index=0"""
sys.argv = ['', '--ch-del', '--ch-index', '0']
Globals.getInstance().set_args(sys.argv)
Globals.getInstance().set_channel_index(1)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: Cannot delete primary channel', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_enable_valid_secondary_channel(capsys):
"""Test --ch-enable with --ch-index"""
sys.argv = ['', '--ch-enable', '--ch-index', '1']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Writing modified channels', out, re.MULTILINE)
assert err == ''
assert Globals.getInstance().get_channel_index() == 1
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_disable_valid_secondary_channel(capsys):
"""Test --ch-disable with --ch-index"""
sys.argv = ['', '--ch-disable', '--ch-index', '1']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Writing modified channels', out, re.MULTILINE)
assert err == ''
assert Globals.getInstance().get_channel_index() == 1
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_enable_without_a_ch_index(capsys):
"""Test --ch-enable without --ch-index"""
sys.argv = ['', '--ch-enable']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: Need to specify', out, re.MULTILINE)
assert err == ''
assert Globals.getInstance().get_channel_index() is None
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_enable_primary_channel(capsys):
"""Test --ch-enable with --ch-index = 0"""
sys.argv = ['', '--ch-enable', '--ch-index', '0']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: Cannot enable/disable PRIMARY', out, re.MULTILINE)
assert err == ''
assert Globals.getInstance().get_channel_index() == 0
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_range_options(capsys):
"""Test changing the various range options."""
range_options = ['--ch-vlongslow', '--ch-longslow', '--ch-longfast', '--ch-midslow',
'--ch-midfast', '--ch-shortslow', '--ch-shortfast']
for range_option in range_options:
sys.argv = ['', f"{range_option}" ]
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
main()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Writing modified channels', out, re.MULTILINE)
assert err == ''
mo.assert_called()
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_ch_longsfast_on_non_primary_channel(capsys):
"""Test --ch-longfast --ch-index 1"""
sys.argv = ['', '--ch-longfast', '--ch-index', '1']
Globals.getInstance().set_args(sys.argv)
mocked_node = MagicMock(autospec=Node)
iface = MagicMock(autospec=SerialInterface)
iface.getNode.return_value = mocked_node
with patch('meshtastic.serial_interface.SerialInterface', return_value=iface) as mo:
with pytest.raises(SystemExit) as pytest_wrapped_e:
main()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Warning: Standard channel settings', out, re.MULTILINE)
assert err == ''
mo.assert_called()
# PositionFlags:
# Misc info that might be helpful (this info will grow stale, just
# a snapshot of the values.) The radioconfig_pb2.PositionFlags.Name and bit values are:
# POS_UNDEFINED 0
# POS_ALTITUDE 1
# POS_ALT_MSL 2
# POS_GEO_SEP 4
# POS_DOP 8
# POS_HVDOP 16
# POS_BATTERY 32
# POS_SATINVIEW 64
# POS_SEQ_NOS 128
# POS_TIMESTAMP 256
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_pos_fields_no_args(capsys):
"""Test --pos-fields no args (which shows settings)"""
sys.argv = ['', '--pos-fields']
Globals.getInstance().set_args(sys.argv)
pos_flags = MagicMock(autospec=meshtastic.radioconfig_pb2.PositionFlags)
with patch('meshtastic.serial_interface.SerialInterface') as mo:
mo().getNode().radioConfig.preferences.position_flags = 35
with patch('meshtastic.radioconfig_pb2.PositionFlags', return_value=pos_flags) as mrc:
mrc.values.return_value = [0, 1, 2, 4, 8, 16, 32, 64, 128, 256]
# Note: When you use side_effect and a list, each call will use a value from the front of the list then
# remove that value from the list. If there are three values in the list, we expect it to be called
# three times.
mrc.Name.side_effect = ['POS_ALTITUDE', 'POS_ALT_MSL', 'POS_BATTERY']
main()
mrc.Name.assert_called()
mrc.values.assert_called()
mo.assert_called()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'POS_ALTITUDE POS_ALT_MSL POS_BATTERY', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_pos_fields_arg_of_zero(capsys):
"""Test --pos-fields an arg of 0 (which shows list)"""
sys.argv = ['', '--pos-fields', '0']
Globals.getInstance().set_args(sys.argv)
pos_flags = MagicMock(autospec=meshtastic.radioconfig_pb2.PositionFlags)
with patch('meshtastic.serial_interface.SerialInterface') as mo:
with patch('meshtastic.radioconfig_pb2.PositionFlags', return_value=pos_flags) as mrc:
def throw_value_error_exception(exc):
raise ValueError()
mrc.Value.side_effect = throw_value_error_exception
mrc.keys.return_value = [ 'POS_UNDEFINED', 'POS_ALTITUDE', 'POS_ALT_MSL',
'POS_GEO_SEP', 'POS_DOP', 'POS_HVDOP', 'POS_BATTERY',
'POS_SATINVIEW', 'POS_SEQ_NOS', 'POS_TIMESTAMP']
main()
mrc.Value.assert_called()
mrc.keys.assert_called()
mo.assert_called()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'ERROR: supported position fields are:', out, re.MULTILINE)
assert re.search(r"['POS_UNDEFINED', 'POS_ALTITUDE', 'POS_ALT_MSL', 'POS_GEO_SEP',"\
"'POS_DOP', 'POS_HVDOP', 'POS_BATTERY', 'POS_SATINVIEW', 'POS_SEQ_NOS',"\
"'POS_TIMESTAMP']", out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_pos_fields_valid_values(capsys):
"""Test --pos-fields with valid values"""
sys.argv = ['', '--pos-fields', 'POS_GEO_SEP', 'POS_ALT_MSL']
Globals.getInstance().set_args(sys.argv)
pos_flags = MagicMock(autospec=meshtastic.radioconfig_pb2.PositionFlags)
with patch('meshtastic.serial_interface.SerialInterface') as mo:
with patch('meshtastic.radioconfig_pb2.PositionFlags', return_value=pos_flags) as mrc:
mrc.Value.side_effect = [ 4, 2 ]
main()
mrc.Value.assert_called()
mo.assert_called()
out, err = capsys.readouterr()
assert re.search(r'Connected to radio', out, re.MULTILINE)
assert re.search(r'Setting position fields to 6', out, re.MULTILINE)
assert re.search(r'Set position_flags to 6', out, re.MULTILINE)
assert re.search(r'Writing modified preferences to device', out, re.MULTILINE)
assert err == ''
@pytest.mark.unit
@pytest.mark.usefixtures("reset_globals")
def test_main_get_with_valid_values(capsys):
"""Test --get with valid values (with string, number, boolean)"""
sys.argv = ['', '--get', 'ls_secs', '--get', 'wifi_ssid', '--get', 'fixed_position']
Globals.getInstance().set_args(sys.argv)
with patch('meshtastic.serial_interface.SerialInterface') as mo:
mo().getNode().radioConfig.preferences.wifi_ssid | |
<reponame>nikmagini/pilot
# Mover.py
# Used by runJob and pilot to transfer input and output files from and to the local SE
import os
import sys
import commands
import re
import urllib
from xml.dom import minidom
from time import time, sleep
from timed_command import timed_command
from pUtil import createPoolFileCatalog, tolog, addToSkipped, removeDuplicates, dumpOrderedItems,\
hasBeenTransferred, getLFN, makeTransRegReport, readpar, getMaxInputSize, headPilotErrorDiag, getCopysetup,\
getCopyprefixLists, getExperiment, getSiteInformation, stripDQ2FromLFN, extractPattern, dumpFile, updateInputFileWithTURLs
from FileHandling import getExtension, getTracingReportFilename, readJSON, getHashedBucketEndpoint, getDirectAccess, useDirectAccessWAN
from FileStateClient import updateFileState, dumpFileStates
from RunJobUtilities import updateCopysetups
from SysLog import sysLog, dumpSysLogTail
# Note: DEFAULT_TIMEOUT and MAX_RETRY are reset in get_data()
MAX_RETRY = 1
MAX_NUMBER_OF_RETRIES = 3
DEFAULT_TIMEOUT = 5*3600/MAX_RETRY # 1h40' if 3 retries # 5 hour total limit on rucio download/upload
from PilotErrors import PilotErrors
from futil import *
import SiteMoverFarm
from configSiteMover import config_sm
PERMISSIONS_DIR = config_sm.PERMISSIONS_DIR
PERMISSIONS_FILE = config_sm.PERMISSIONS_FILE
CMD_CHECKSUM = config_sm.COMMAND_MD5
# Default archival type
ARCH_DEFAULT = config_sm.ARCH_DEFAULT
class replica:
""" Replica """
sfn = None
setname = None
fs = None
filesize = None
csumvalue = None
rse = None
filetype = None
def createZippedDictionary(list1, list2):
""" Created a zipped dictionary from input lists """
# list1 = [a1, a2, ..]
# list2 = [b1, b2, ..]
# -> dict = {a1:b1, a2:b2, ..}
d = None
if len(list1) == len(list2):
try:
d = dict(zip(list1, list2))
except Exception,e:
tolog("Warning: Dictionary creation failed: %s" % str(e))
else:
tolog("Created dictionary: %s" % str(d))
else:
tolog("Warning: Cannot create zipped dictionary using: list1=%s, list2=%s (different lengths)" % (str(list1), str(list2)))
return d
def getProperDatasetNames(realDatasetsIn, prodDBlocks, inFiles):
""" Get all proper dataset names """
dsname = ""
dsdict = {}
rucio_dataset_dictionary = {}
# fill the dataset dictionary
if realDatasetsIn and len(realDatasetsIn) == 1 and realDatasetsIn[0] != 'NULL':
dsname = realDatasetsIn[0]
if not dsdict.has_key(dsname): dsdict[dsname] = []
dsdict[dsname].append(inFiles[0])
elif realDatasetsIn and len(realDatasetsIn) > 1:
for i in range(len(inFiles)):
inFile = inFiles[i]
dsname = realDatasetsIn[i]
if not dsdict.has_key(dsname):
dsdict[dsname] = []
dsdict[dsname].append(inFile)
# finally fill the proper dataset/container dictionary to be used for rucio traces
for i in range(len(inFiles)):
inFile = inFiles[i]
proper_dsname = prodDBlocks[i]
if not rucio_dataset_dictionary.has_key(proper_dsname):
rucio_dataset_dictionary[proper_dsname] = []
rucio_dataset_dictionary[proper_dsname].append(inFile)
return dsname, dsdict, rucio_dataset_dictionary
# new mover implementation
def put_data_new(job, jobSite, stageoutTries, log_transfer=False, special_log_transfer=False, workDir=None):
"""
Do jobmover.stageout_outfiles or jobmover.stageout_logfiles (if log_transfer=True)
or jobmover.stageout_logfiles_os (if special_log_transfer=True)
:backward compatible return: (rc, pilotErrorDiag, rf, "", filesNormalStageOut, filesAltStageOut)
"""
tolog("Mover put data started [new implementation]")
from PilotErrors import PilotException
from movers import JobMover
from movers.trace_report import TraceReport
si = getSiteInformation(job.experiment)
si.setQueueName(jobSite.computingElement) # WARNING: SiteInformation is singleton: may be used in other functions! FIX me later
workDir = workDir or os.path.dirname(job.workdir)
mover = JobMover(job, si, workDir=workDir, stageoutretry=stageoutTries)
eventType = "put_sm"
if log_transfer:
eventType += '_logs'
if special_log_transfer:
eventType += '_logs_os'
if job.isAnalysisJob():
eventType += "_a"
mover.trace_report = TraceReport(pq=jobSite.sitename, localSite=jobSite.sitename, remoteSite=jobSite.sitename, dataset="", eventType=eventType)
mover.trace_report.init(job)
error = None
try:
do_stageout_func = mover.stageout_logfiles if log_transfer else mover.stageout_outfiles
if special_log_transfer:
do_stageout_func = mover.stageout_logfiles_os
transferred_files, failed_transfers = do_stageout_func()
except PilotException, e:
error = e
except Exception, e:
tolog("ERROR: Mover put data failed [stageout]: exception caught: %s" % e)
import traceback
tolog(traceback.format_exc())
error = PilotException('STAGEOUT FAILED, exception=%s' % e, code=PilotErrors.ERR_STAGEOUTFAILED, state='STAGEOUT_FAILED')
if error:
## send trace
mover.trace_report.update(clientState=error.state or 'STAGEOUT_FAILED', stateReason=error.message, timeEnd=time())
mover.sendTrace(mover.trace_report)
return error.code, error.message, [], "", 0, 0
tolog("Mover put data finished")
# prepare compatible output
# keep track of which files have been copied
fields = [''] * 7 # file info field used by job recovery in OLD compatible format
#errors = []
#for is_success, success_transfers, failed_transfers, exception in output:
# for fdata in success_transfers: # keep track of which files have been copied
# for i,v in enumerate(['surl', 'lfn', 'guid', 'filesize', 'checksum', 'farch', 'pfn']): # farch is not used
# value = fdata.get(v, '')
# if fields[i]:
# fields[i] += '+'
# fields[i] += '%s' % str(value)
# if exception:
# errors.append(str(exception))
# for err in failed_transfers:
# errors.append(str(err))
files = job.outData if not log_transfer else job.logData
if special_log_transfer:
files = job.logSpecialData
not_transferred = [e.lfn for e in files if e.status not in ['transferred']]
if not_transferred:
err_msg = 'STAGEOUT FAILED: not all output files have been copied: remain files=%s, errors=%s' % ('\n'.join(not_transferred), ';'.join([str(ee) for ee in failed_transfers]))
tolog("Mover put data finished: error_msg=%s" % err_msg)
return PilotErrors.ERR_STAGEOUTFAILED, err_msg, [], "", 0, 0
return 0, "", fields, "", len(transferred_files), 0
# new mover implementation
def put_data_es(job, jobSite, stageoutTries, files, workDir=None, activity=None):
"""
Do jobmover.stageout_outfiles or jobmover.stageout_logfiles (if log_transfer=True)
or jobmover.stageout_logfiles_os (if special_log_transfer=True)
:backward compatible return: (rc, pilotErrorDiag, rf, "", filesNormalStageOut, filesAltStageOut)
"""
tolog("Mover put data started [new implementation]")
from PilotErrors import PilotException
from movers import JobMover
from movers.trace_report import TraceReport
si = getSiteInformation(job.experiment)
si.setQueueName(jobSite.computingElement) # WARNING: SiteInformation is singleton: may be used in other functions! FIX me later
workDir = workDir or os.path.dirname(job.workdir)
mover = JobMover(job, si, workDir=workDir, stageoutretry=stageoutTries)
eventType = "put_es"
mover.trace_report = TraceReport(pq=jobSite.sitename, localSite=jobSite.sitename, remoteSite=jobSite.sitename, dataset="", eventType=eventType)
mover.trace_report.init(job)
error = None
storageId = None
try:
if not activity:
activity = "es_events"
file = files[0]
if file.storageId and file.storageId != -1:
storageId = file.storageId
copytools = [('objectstore', {'setup': ''})]
else:
copytools = None
transferred_files, failed_transfers = mover.stageout(activity=activity, files=files, copytools=copytools)
except PilotException, e:
error = e
except Exception, e:
tolog("ERROR: Mover put data failed [stageout]: exception caught: %s" % e)
import traceback
tolog(traceback.format_exc())
error = PilotException('STAGEOUT FAILED, exception=%s' % e, code=PilotErrors.ERR_STAGEOUTFAILED, state='STAGEOUT_FAILED')
if error:
## send trace
mover.trace_report.update(clientState=error.state or 'STAGEOUT_FAILED', stateReason=error.message, timeEnd=time())
mover.sendTrace(mover.trace_report)
return error.code, error.message, None
tolog("Mover put data finished")
# prepare compatible output
# keep track of which files have been copied
not_transferred = [e.lfn for e in files if e.status not in ['transferred']]
if not_transferred:
err_msg = 'STAGEOUT FAILED: not all output files have been copied: remain files=%s, errors=%s' % ('\n'.join(not_transferred), ';'.join([str(ee) for ee in failed_transfers]))
tolog("Mover put data finished: error_msg=%s" % err_msg)
return PilotErrors.ERR_STAGEOUTFAILED, err_msg, None
return 0, "", storageId
# new mover implementation:
# keep the list of input arguments as is for smooth migration
def get_data_new(job,
jobSite,
ins=None, # ignored, not used anymore, use job.inData instead
stageinTries=2,
analysisJob=False, # ignored, not used anymore (use job.isAnalysisJob instead)
usect=True, # ignored, not used anymore
pinitdir="", # not used??
proxycheck=True, # TODO
inputDir="", # for mv mover?? not used??
workDir="", # pilot work dir used to check/update file states
files=None, # input files to stagein
pfc_name="PoolFileCatalog.xml"
):
"""
call the mover and stage-in input files
:backward compatible return: (ec, pilotErrorDiag, None (statusPFCTurl), FAX_dictionary)
"""
tolog("Mover get data started [new implementation]")
# new implementation
from PilotErrors import PilotException
from movers import JobMover
from movers.trace_report import TraceReport
si = getSiteInformation(job.experiment)
si.setQueueName(jobSite.computingElement) # WARNING: SiteInformation is singleton: may be used in other functions! FIX me later
mover = JobMover(job, si, workDir=workDir, stageinretry=stageinTries)
eventType = "get_sm"
if job.isAnalysisJob():
eventType += "_a"
mover.trace_report = TraceReport(pq=jobSite.sitename, localSite=jobSite.sitename, remoteSite=jobSite.sitename, dataset="", eventType=eventType)
mover.trace_report.init(job)
error = None
try:
output = mover.stagein(files=files, analyjob=job.isAnalysisJob())
except PilotException, e:
error = e
tolog("!!WARNING!!4545!! Caught exception: %s" % (e))
except Exception, e:
tolog("ERROR: Mover get data failed [stagein]: exception caught: %s" % e)
error = PilotException('STAGEIN FAILED, exception=%s' % e, code=PilotErrors.ERR_STAGEINFAILED, state='STAGEIN_FAILED')
import traceback
tolog(traceback.format_exc())
if error:
## send trace
mover.trace_report.update(clientState=error.state or 'STAGEIN_FAILED', stateReason=error.message, timeEnd=time())
mover.sendTrace(mover.trace_report)
return error.code, error.message, None, {}
tolog("Mover get data finished")
# prepare compatible output
not_transferred = [e.lfn for e in job.inData if e.status not in ['transferred', 'remote_io', 'no_transfer']]
if not_transferred:
return PilotErrors.ERR_STAGEINFAILED, 'STAGEIN FAILED: not all input files have been copied: remain=%s' % '\n'.join(not_transferred), None, {}
tfiles = [e for e in job.inData if e.status == 'transferred']
job.bytesWithoutFAX = reduce(lambda x, y: x + y.filesize, tfiles, 0)
job.filesWithoutFAX = len(tfiles)
job.filesWithFAX = 0
job.bytesWithFAX = 0
# backward compatible dict
FAX_dictionary = dict(N_filesWithFAX=job.filesWithFAX, bytesWithFAX=job.bytesWithFAX,
N_filesWithoutFAX=job.filesWithoutFAX, bytesWithoutFAX=job.bytesWithoutFAX)
#FAX_dictionary['usedFAXandDirectIO'] = False
### reuse usedFAXandDirectIO variable as special meaning attribute to form command option list later
### FIX ME LATER
FAX_dictionary['usedFAXandDirectIO'] = 'newmover'
used_direct_access = [e for e in job.inData if e.status == 'remote_io']
if used_direct_access:
FAX_dictionary['usedFAXandDirectIO'] = 'newmover-directaccess'
# create PoolFileCatalog.xml / PFC.xml
# (turl based for Prefetcher)
files, lfns = {}, []
for fspec in job.inData:
pfn = fspec.lfn
if fspec.status == 'remote_io':
pfn = fspec.turl
files[fspec.guid] = pfn or ''
lfns.append(fspec.lfn)
createPoolFileCatalog(files, lfns, pfc_name)
| |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
import logging
from gevent import pool, monkey
from api import datamanage_api
from common.mixins.meta_cache_mixin import MetaCacheMixin
from common.redis import connections
from utils.time import strtotime
from dmonitor.settings import META_CACHE_CONFIGS, GEOG_AREA_CODE
monkey.patch_all()
class DmonitorMetaCacheMixin(MetaCacheMixin):
"""数据监控元数据刷新Mixin,主要用于从Redis中同步最新的元数据信息
"""
SYNC_POOL_SIZE = 100
def __init__(self, *args, **kwargs):
super(DmonitorMetaCacheMixin, self).__init__(*args, **kwargs)
def fetch_from_redis(self, redis_conn, key, result_id, result):
"""从缓存中获取元数据信息,并写到结果集中
:param redis_conn: 缓存连接
:param key: 需要获取的缓存key
:pararm result_id: 组装成字典后的主键
:param result: 结果集
"""
redis_content = redis_conn.get(key)
if redis_content:
try:
redis_data = json.loads(redis_content)
result[result_id] = redis_data
except Exception as e:
logging.error(e, exc_info=True)
def fetch_data_set_infos_from_redis(self):
data_sets = {}
sync_pool = pool.Pool(self.SYNC_POOL_SIZE)
redis_conn = connections['default']
key_sets = META_CACHE_CONFIGS['data_set']['key_sets']
key_template = META_CACHE_CONFIGS['data_set']['key_template']
data_set_ids = redis_conn.smembers(key_sets)
for data_set_id in data_set_ids:
data_set_key = key_template.format(data_set_id)
sync_pool.spawn(self.fetch_from_redis, redis_conn, data_set_key, data_set_id, data_sets)
sync_pool.join()
return data_sets
def fetch_data_set_by_id_from_redis(self, data_set_id):
redis_conn = connections['default']
key_template = META_CACHE_CONFIGS['data_set']['key_template']
data_set_key = key_template.format(data_set_id)
try:
return json.loads(redis_conn.get(data_set_key))
except Exception as e:
logging.error(e, exc_info=True)
return {}
def fetch_data_operations_from_redis(self):
data_operations = {}
sync_pool = pool.Pool(self.SYNC_POOL_SIZE)
redis_conn = connections['default']
key_sets = META_CACHE_CONFIGS['data_operation']['key_sets']
key_template = META_CACHE_CONFIGS['data_operation']['key_template']
data_operation_ids = redis_conn.smembers(key_sets)
for data_operation_id in data_operation_ids:
data_operation_key = key_template.format(data_operation_id)
sync_pool.spawn(self.fetch_from_redis, redis_conn, data_operation_key, data_operation_id, data_operations)
sync_pool.join()
return data_operations
def fetch_flow_infos_from_redis(self):
flow_infos = {}
sync_pool = pool.Pool(self.SYNC_POOL_SIZE)
redis_conn = connections['default']
key_sets = META_CACHE_CONFIGS['flow']['key_sets']
key_template = META_CACHE_CONFIGS['flow']['key_template']
flow_ids = redis_conn.smembers(key_sets)
for flow_id in flow_ids:
flow_key = key_template.format(flow_id)
sync_pool.spawn(self.fetch_from_redis, redis_conn, flow_key, flow_id, flow_infos)
sync_pool.join()
return flow_infos
def fetch_dataflow_infos_from_redis(self):
dataflow_infos = {}
sync_pool = pool.Pool(self.SYNC_POOL_SIZE)
redis_conn = connections['default']
key_sets = META_CACHE_CONFIGS['dataflow']['key_sets']
key_template = META_CACHE_CONFIGS['dataflow']['key_template']
dataflow_ids = redis_conn.smembers(key_sets)
for dataflow_id in dataflow_ids:
try:
dataflow_id = int(dataflow_id)
except ValueError:
continue
flow_key = key_template.format(dataflow_id)
sync_pool.spawn(self.fetch_from_redis, redis_conn, flow_key, dataflow_id, dataflow_infos)
sync_pool.join()
return dataflow_infos
def fetch_alert_configs(self):
"""获取数据监控告警配置
"""
try:
res = datamanage_api.alert_configs.list({
'tags': [GEOG_AREA_CODE],
'active': True
})
return res.data if res.is_success() else []
except Exception as e:
logging.error(e, exc_info=True)
return []
def fetch_disabled_alert_configs(self, recent_updated=300):
try:
res = datamanage_api.alert_configs.list({
'tags': [GEOG_AREA_CODE],
'active': False,
'recent_updated': recent_updated,
})
return res.data if res.is_success() else []
except Exception as e:
logging.error(e, exc_info=True)
return []
def fetch_alert_shields(self):
"""获取告警屏蔽配置
"""
try:
res = datamanage_api.alert_shields.in_effect()
alert_shields = res.data if res.is_success() else []
for alert_shield in alert_shields:
alert_shield['start_time'] = strtotime(alert_shield['start_time'])
alert_shield['end_time'] = strtotime(alert_shield['end_time'])
return alert_shields
except Exception as e:
logging.error(e, exc_info=True)
return []
def gen_logical_key(self, tags):
"""生成数据流逻辑标识的key
:param tags: 维度字典
:return: 数据流逻辑标识
"""
return '{module}_{component}_{logical_tag}'.format(**tags)
def gen_unique_key(self, tags):
"""用所有tags生成唯一key
:param tags: 所有维度
:return: 唯一key
"""
keys = sorted(tags.keys())
return '_'.join(map(lambda x: str(tags[x]), keys))
def gen_receivers(self, alert_config):
"""从告警配置中的接收者配置生成告警接收者列表
:param alert_config: 告警配置
:return: 告警接收者列表
"""
receivers = []
for receiver_config in alert_config.get('receivers', []):
receivers.append(self.gen_receiver(receiver_config))
return receivers
def gen_receiver(self, receiver_config):
"""根据接收者配置生成接收者列表
:param receiver_config: 接收者配置
:return: 接收者列表
"""
if receiver_config['receiver_type'] == 'user':
return receiver_config['username']
elif receiver_config['receiver_type'] == 'list':
return ','.join(receiver_config['userlist'])
else:
return json.dumps(receiver_config)
def gen_notify_ways(self, alert_config):
"""根据告警配置中的通知配置生成通知方式列表
:param notify_config: 通知方式配置
:return: 通知方式列表
"""
return alert_config.get('notify_config', [])
def get_flow_node_by_target(self, target):
"""从告警配置的target配置中获取flow_id和node_id
:param target: 告警配置的告警对象配置
:return: 数据流ID和数据流节点ID
"""
flow_id, node_id = None, None
if target['target_type'] == 'dataflow':
flow_id = int(target.get('flow_id') or 0) or None
node_id = int(target.get('node_id') or 0) or None
elif target['target_type'] == 'rawdata':
flow_id = 'rawdata%s' % target.get('raw_data_id')
node_id = target.get('data_set_id')
return flow_id, node_id
def get_flow_display(self, flow_info, language='zh-cn'):
"""根据数据流信息和语言配置生成数据流的展示方式
:param flow_info: 数据流信息
:return: 数据流展示文本
"""
RAW_DATA_DISPLAY = {
'zh-cn': '数据源',
'en': 'Raw Data',
}
TASK_DISPLAY = {
'zh-cn': '数据开发任务',
'en': 'DataFlow Task',
}
if flow_info.get('flow_type') == 'rawdata':
return RAW_DATA_DISPLAY[language], flow_info.get('raw_data_alias')
else:
return TASK_DISPLAY[language], flow_info.get('flow_name')
def get_logical_tag_display(self, logical_tag, tags=None, flow_info={}):
"""生成数据流逻辑标识展示信息
:param logical_tag: 数据流逻辑标识
:param tags: 维度字典
:param flow_info: 数据流信息
:return: 逻辑标识展示文本
"""
if logical_tag.isdigit():
entity_display = '数据源'
entity_display_en = 'RawData'
if 'flow_name' in flow_info:
entity_display = '{}[{}]'.format(entity_display, flow_info['flow_name'])
entity_display_en = '{}[{}]'.format(entity_display_en, flow_info['flow_name'])
else:
flow_type_display, flow_name = self.get_flow_display(flow_info)
flow_type_display_en, flow_name_en = self.get_flow_display(flow_info, 'en')
flow_display = '{}[{}]'.format(flow_type_display, flow_name)
flow_display_en = '{}[{}]'.format(flow_type_display_en, flow_name_en)
TASK_DISPLAYS = {
'clean': {
'zh-cn': '清洗任务',
'en': 'Clean task',
},
'stream': {
'zh-cn': '实时节点',
'en': 'Stream task',
},
'realtime': {
'zh-cn': '实时节点',
'en': 'Stream task',
},
'batch': {
'zh-cn': '离线节点',
'en': 'Batch task',
},
'shipper': {
'zh-cn': '入库任务',
'en': 'Shipper task',
}
}
if flow_info.get('flow_type') == 'rawdata':
node_info = flow_info.get('nodes', {}).get(logical_tag, {})
node_name = node_info.get('clean_config_name')
else:
node_id = str(tags.get('node_id'))
node_info = flow_info.get('nodes', {}).get(node_id, {})
node_name = node_info.get('node_name')
if tags is not None:
module = tags.get('module')
entity_display = '{}[{}]'.format(TASK_DISPLAYS.get(module, {}).get('zh-cn', '任务'), node_name)
entity_display_en = '{}[{}]'.format(TASK_DISPLAYS.get(module, {}).get('en', 'Task'), node_name)
if module == "shipper":
entity_display = "{}({})".format(
entity_display, tags.get("storage_cluster_type")
)
entity_display_en = "{}({})".format(
entity_display_en, tags.get("storage_cluster_type")
)
else:
entity_display = '任务[{}]'.format(node_name)
entity_display_en = 'Task[{}]'.format(node_name)
entity_display = '{}-{}'.format(flow_display, entity_display)
entity_display_en = '{}-{}'.format(flow_display_en, entity_display_en)
return entity_display, entity_display_en
def get_flow_node_display(self, flow_info, node_info):
"""生成数据流节点的展示文本
:param flow_info: 数据流信息
:param node_info: 数据流节点信息
:return: 数据流节点的展示文本
"""
entity_display = '任务({flow_name})'.format(flow_name=flow_info.get('flow_name'))
entity_display_en = 'Task({flow_name})'.format(flow_name=flow_info.get('flow_name'))
if len(node_info.keys()) > 0:
entity_display = '{flow}节点({node_name})'.format(flow=entity_display, node_name=node_info.get('node_name'))
entity_display_en = '{flow} Node({node_name})'.format(
flow=entity_display_en, node_name=node_info.get('node_name')
)
return entity_display, entity_display_en
def get_data_operation_display(self, data_operation):
"""生成数据处理或者数据传输的展示文本
:param data_operation: 数据处理或者数据传输配置
:return: 展示文本
"""
data_processing_display = '数据处理任务({processing_id})'
data_processing_display_en = 'Data Processing Task({processing_id})'
data_transferring_display = '数据传输任务({transferring_id})'
data_transferring_display_en = 'Data Transferring Task({transferring_id})'
if data_operation.get('data_operation_type') == 'data_processing':
processing_id = data_operation.get('processing_id')
return (data_processing_display.format(processing_id=processing_id),
data_processing_display_en.format(processing_id=processing_id))
elif data_operation.get('data_operation_type') == 'data_transferring':
transferring_id = data_operation.get('transferring_id')
return (data_transferring_display.format(transferring_id=transferring_id),
data_transferring_display_en.format(transferring_id=transferring_id))
def gen_target_id(self, target):
"""生成告警对象唯一ID
:param target: 告警对象配置
:return: 告警对象唯一ID
"""
target_items = [target.get('target_type')]
flow_id, node_id = self.get_flow_node_by_target(target)
if flow_id:
target_items.append('%s=%s' % ('flow_id', str(flow_id)))
if 'dimensions' in target:
for key, value in target['dimensions'].items():
target_items.append('%s=%s' % (key, value))
return '&'.join(target_items)
def convert_display_time(self, seconds, target_unit=None, precision='minute'):
"""转换时间展示
:param seconds: 秒数
:param target_unit: 转换目标单位
:param precision: 转换的精度
:return: 时间展示文本
"""
seconds = int(seconds)
last_times = {
'seconds': 0,
'minutes': 0,
'hours': 0,
}
if target_unit == 'second' or (target_unit is None and seconds < 60):
time_display = '{seconds}秒'.format(seconds=seconds)
time_display_en = '{seconds}s'.format(seconds=seconds)
return time_display, time_display_en
minutes = seconds // 60
last_times['seconds'] = seconds - minutes * 60
if target_unit == 'minute' or (target_unit is None and minutes < 60):
return self.format_as_minute(minutes, precision, last_times)
hours = minutes // 60
last_times['minutes'] = minutes - hours * 60
if target_unit == 'hour' or (target_unit is None and hours < 24):
return self.format_as_hour(hours, precision, last_times)
days = hours // 24
last_times['hours'] = hours - days * 24
return self.format_as_day(days, precision, last_times)
def format_as_minute(self, minutes, precision, last_times):
time_display = '{minutes}分钟'.format(minutes=minutes) if minutes != 0 else ''
time_display_en = '{minutes}m'.format(minutes=minutes) if minutes != 0 else ''
if precision == 'second':
sec_display, sec_display_en = self.convert_display_time(last_times['seconds'], 'second', 'second')
time_display = time_display + sec_display
time_display_en = time_display_en + sec_display_en
return time_display, time_display_en
def format_as_hour(self, hours, precision, last_times):
time_display = '{hours}小时'.format(hours=hours) if hours != 0 else ''
time_display_en = '{hours}h'.format(hours=hours) if hours != 0 else ''
if precision in ('minute', 'second'):
min_display, min_display_en = self.convert_display_time(last_times['minutes'] * 60, 'minute', 'minute')
time_display = time_display + min_display
time_display_en = time_display_en + min_display_en
if precision == 'second':
sec_display, sec_display_en = self.convert_display_time(last_times['seconds'], 'second', 'second')
time_display = time_display + sec_display
time_display_en = time_display_en + sec_display_en
return time_display, time_display_en
def format_as_day(self, days, precision, last_times):
time_display = '{days}天'.format(days=days) if days != 0 else ''
time_display_en = '{days}d'.format(days=days) if days != 0 else ''
if precision in ('second', 'minute', 'hour'):
hour_display, hour_display_en = self.convert_display_time(last_times['hours'] * 3600, 'hour', 'hour')
time_display = time_display + | |
<reponame>gonzalezf/TopicVisExplorer
# -*- coding: utf-8 -*-
import datetime
import random, threading, webbrowser
import gensim, pickle, random
import gensim_helpers
import numpy as np
import sklearn
import os
import pandas as pd
import pickle #Descomentar segun la version de python
#import pickle5 as pickle
import json as js
import time
from gensim.corpora import Dictionary
from flask import Flask, render_template, request, json, Response, render_template_string, jsonify, redirect
from flask_classful import FlaskView,route
from _display import *
from _prepare import prepare, js_PCoA, PreparedData, _pcoa
from _topic_similarity_matrix import *
from _topic_similarity_matrix_metric_baseline import *
from _get_new_circle_positions import *
from _topic_splitting_helpers import *
from os import path, walk
from gensim.models.keyedvectors import KeyedVectors
from scipy.spatial import procrustes
from random import sample
from utils import get_id, write_ipynb_local_js, NumPyEncoder
from _prepare import PreparedData
from copy import deepcopy
from pandarallel import pandarallel
pandarallel.initialize()
class MaxClientQueue:
def __init__(self, n) -> None:
self.max_n = n
self.client_list = []
def purge(self, client_ip):
global single_corpus_datasets
global multi_corpora_datasets
global previous_single_corpus_datasets
single_corpus_datasets.pop(client_ip, None)
multi_corpora_datasets.pop(client_ip, None)
previous_single_corpus_datasets.pop(client_ip, None)
print(f"Purged {client_ip}", flush=True)
def enqueue(self, client_ip: str) -> None:
if client_ip not in self.client_list:
self.client_list.append(client_ip)
if len(self.client_list) > self.max_n:
self.purge(self.client_list.pop(0))
scenarios = {
}
single_corpus_datasets = {}
multi_corpora_datasets = {}
previous_single_corpus_datasets = {}
client_queue = MaxClientQueue(5)
class TopicVisExplorer:
app = None
def __init__(self, name):
self.app = Flask(name)
TestView.register(self.app, route_base = '/')
def run(self):
self.app.run(debug=False, host="0.0.0.0", port=5000)
def load_scenarios(self, scenarios_dict):
global scenarios
scenarios = scenarios_dict
def calculate_topic_similarity_on_single_corpus_for_topic_splitting(self, current_number_of_topics, word_embedding_model, lda_model, corpus, id2word, matrix_documents_topic_contribution,topn_terms, topk_documents, relevance_lambda ):
#single_corpus_data['lda_model'].num_topics = single_corpus_data['lda_model'].num_topics+1
#single_corpus_data['lda_model'].num_topics
temp_lda = deepcopy(lda_model)
temp_lda.num_topics = current_number_of_topics+1
return self.calculate_topic_similarity_on_single_corpus(word_embedding_model, temp_lda, corpus, id2word, matrix_documents_topic_contribution,topn_terms, topk_documents, relevance_lambda )
def calculate_topic_similarity_on_single_corpus(self, word_embedding_model, lda_model, corpus, id2word, matrix_documents_topic_contribution,topn_terms, topk_documents, relevance_lambda ):
global single_corpus_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
single_corpus_data = single_corpus_datasets[ip]
print("we are calculating a new topic similarity matirx")
if 'data_dict' not in single_corpus_data:
data_dict = gensim_helpers.prepare(lda_model, corpus,id2word)
single_corpus_data['data_dict'] = data_dict
if 'PreparedDataObtained' not in single_corpus_data:
temp = prepare(**data_dict)
single_corpus_data['PreparedDataObtained'] = temp.to_dict()
if 'relevantDocumentsDict' not in single_corpus_data:
relevantDocumentsDict = matrix_documents_topic_contribution.to_dict(orient='records')
single_corpus_data['relevantDocumentsDict'] = relevantDocumentsDict
#update word embedding model
single_corpus_data['word_embedding_model'] = word_embedding_model
#get most relevant keywords sorted by relevance
#in merging, we should update this list
tinfo_collection_1 = pd.DataFrame.from_dict(single_corpus_data['PreparedDataObtained']['tinfo'])
tinfo_collection_1['relevance'] = relevance_lambda * tinfo_collection_1['logprob']+ (1.00-relevance_lambda)*tinfo_collection_1['loglift']
# We need the topkeywords and toprelevantdocuments vectors here!!!
topkeywords_vectors_dict_1, relevantdocuments_vectors_dict_1 = get_topkeywords_relevantdocuments_vectors(word_embedding_model, lda_model,pd.DataFrame(single_corpus_data['relevantDocumentsDict']), topn_terms, tinfo_collection_1, topk_documents)
#save data
single_corpus_data['tinfo_collection'] = tinfo_collection_1
single_corpus_data['topkeywords_vectors_dict'] = topkeywords_vectors_dict_1
single_corpus_data['relevantdocuments_vectors_dict'] = relevantdocuments_vectors_dict_1
return get_dict_topic_similarity_matrix(word_embedding_model, lda_model,matrix_documents_topic_contribution,lda_model,matrix_documents_topic_contribution, topn_terms, single_corpus_data['PreparedDataObtained'], single_corpus_data['PreparedDataObtained'], topk_documents, relevance_lambda, tinfo_collection_1, tinfo_collection_1,topkeywords_vectors_dict_1,topkeywords_vectors_dict_1, relevantdocuments_vectors_dict_1, relevantdocuments_vectors_dict_1)
def calculate_topic_similarity_on_multi_corpora(self, word_embedding_model, lda_model_1, lda_model_2, corpus_1,corpus_2, id2word_1,id2word_2, matrix_documents_topic_contribution_1, matrix_documents_topic_contribution_2, topn_terms, topk_documents, relevance_lambda ):
global multi_corpora_datasets
global single_corpus_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
multi_corpora_data = multi_corpora_datasets[ip]
single_corpus_data = single_corpus_datasets[ip]
if 'data_dict_1' not in multi_corpora_data:
data_dict_1 = gensim_helpers.prepare(lda_model_1, corpus_1,id2word_1)
multi_corpora_data['data_dict_1'] = data_dict_1
if 'PreparedDataObtained_collection_1' not in multi_corpora_data:
temp_1 = prepare(**data_dict_1)
multi_corpora_data['PreparedDataObtained_collection_1'] = temp_1.to_dict()
if 'relevantDocumentsDict_collection_1' not in multi_corpora_data:
relevantDocumentsDict_collection_1 = matrix_documents_topic_contribution_1.to_dict(orient='records')
multi_corpora_data['relevantDocumentsDict_collection_1'] = relevantDocumentsDict_collection_1
if 'data_dict_2' not in multi_corpora_data:
data_dict_2 = gensim_helpers.prepare(lda_model_2, corpus_2,id2word_2)
multi_corpora_data['data_dict_2'] = data_dict_2
if 'PreparedDataObtained_collection_2' not in multi_corpora_data:
temp_2 = prepare(**data_dict_2)
multi_corpora_data['PreparedDataObtained_collection_2'] = temp_2.to_dict()
if 'relevantDocumentsDict_collection_2' not in multi_corpora_data:
relevantDocumentsDict_collection_2 = matrix_documents_topic_contribution_2.to_dict(orient='records')
multi_corpora_data['relevantDocumentsDict_collection_2'] = relevantDocumentsDict_collection_2
#get most relevant keywords sorted by relevance
#in merging, we should update this list
tinfo_collection_1 = pd.DataFrame.from_dict(multi_corpora_data['PreparedDataObtained_collection_1']['tinfo'])
tinfo_collection_1['relevance'] = relevance_lambda * tinfo_collection_1['logprob']+ (1.00-relevance_lambda)*tinfo_collection_1['loglift']
tinfo_collection_2 = pd.DataFrame.from_dict(multi_corpora_data['PreparedDataObtained_collection_2']['tinfo'])
tinfo_collection_2['relevance'] = relevance_lambda * tinfo_collection_2['logprob']+ (1.00-relevance_lambda)*tinfo_collection_2['loglift']
# We need the topkeywords and toprelevantdocuments vectors here!!!
topkeywords_vectors_dict_1, relevantdocuments_vectors_dict_1 = get_topkeywords_relevantdocuments_vectors(word_embedding_model, lda_model_1,pd.DataFrame(multi_corpora_data['relevantDocumentsDict_collection_1']), topn_terms, tinfo_collection_1, topk_documents)
topkeywords_vectors_dict_2, relevantdocuments_vectors_dict_2 = get_topkeywords_relevantdocuments_vectors(word_embedding_model, lda_model_2,pd.DataFrame(multi_corpora_data['relevantDocumentsDict_collection_2']), topn_terms, tinfo_collection_2, topk_documents)
#save data
single_corpus_data['tinfo_collection_1'] = tinfo_collection_1
single_corpus_data['tinfo_collection_2'] = tinfo_collection_2
single_corpus_data['topkeywords_vectors_dict_1'] = topkeywords_vectors_dict_1
single_corpus_data['topkeywords_vectors_dict_2'] = topkeywords_vectors_dict_2
single_corpus_data['relevantdocuments_vectors_dict_1'] = relevantdocuments_vectors_dict_1
single_corpus_data['relevantdocuments_vectors_dict_2'] = relevantdocuments_vectors_dict_2
return get_dict_topic_similarity_matrix(word_embedding_model, lda_model_1,matrix_documents_topic_contribution_1,lda_model_2,matrix_documents_topic_contribution_2, topn_terms, multi_corpora_data['PreparedDataObtained_collection_1'], multi_corpora_data['PreparedDataObtained_collection_2'], topk_documents, relevance_lambda, tinfo_collection_1, tinfo_collection_2,topkeywords_vectors_dict_1, topkeywords_vectors_dict_2, relevantdocuments_vectors_dict_1, relevantdocuments_vectors_dict_2)
def calculate_topic_similarity_on_multi_corpora_metric_baseline(self, word_embedding_model, lda_model_1, lda_model_2, corpus_1,corpus_2, id2word_1,id2word_2, relevance_lambda = 0.6, topn_terms=20):
# get prepared data of lda_model_1, and lda_model_2
data_dict_1 = gensim_helpers.prepare(lda_model_1, corpus_1,id2word_1)
prepared_data_topic_1 = prepare(**data_dict_1)
#PreparedDataObtained_1_dict = PreparedDataObtained_1.to_dict()
# get prepared data of lda_model_1, and lda_model_2
data_dict_2 = gensim_helpers.prepare(lda_model_2, corpus_2,id2word_2)
prepared_data_topic_2 = prepare(**data_dict_2)
#PreparedDataObtained_2_dict = PreparedDataObtained_2.to_dict()
return generar_matrix_baseline_metric(word_embedding_model, prepared_data_topic_1, prepared_data_topic_2, relevance_lambda, topn_terms)
def prepare_single_corpus(self, lda_model, corpus, id2word, matrix_documents_topic_contribution, topic_similarity_matrix):
global single_corpus_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
single_corpus_data = single_corpus_datasets[ip]
if 'data_dict' not in single_corpus_data:
data_dict = gensim_helpers.prepare(lda_model, corpus,id2word)
single_corpus_data['data_dict'] = data_dict
if 'PreparedDataObtained' not in single_corpus_data:
print("A NEW PREPARED DATA HA SIDO CREADO!!")
temp = prepare(**single_corpus_data['data_dict'])
single_corpus_data['PreparedDataObtained'] = temp.to_dict()
if 'relevantDocumentsDict' not in single_corpus_data:
relevantDocumentsDict = matrix_documents_topic_contribution.to_dict(orient='records')
single_corpus_data['relevantDocumentsDict'] = relevantDocumentsDict
new_circle_positions = get_circle_positions(topic_similarity_matrix)
single_corpus_data['lda_model'] = lda_model
single_corpus_data['corpus'] = corpus
single_corpus_data['id2word'] = id2word
single_corpus_data['topic_similarity_matrix'] = topic_similarity_matrix
single_corpus_data['topic.order'] = single_corpus_data['PreparedDataObtained']['topic.order']
single_corpus_data['new_circle_positions'] = new_circle_positions
def prepare_multi_corpora(self, lda_model_1,lda_model_2, corpus_1, corpus_2, id2word_1,id2word_2, matrix_documents_topic_contribution_1,matrix_documents_topic_contribution_2, topic_similarity_matrix):
multi_corpora_data = {}
if 'data_dict_1' not in multi_corpora_data:
data_dict_1 = gensim_helpers.prepare(lda_model_1, corpus_1,id2word_1)
multi_corpora_data['data_dict_1'] = data_dict_1
if 'PreparedDataObtained_collection_1' not in multi_corpora_data:
temp_1 = prepare(**multi_corpora_data['data_dict_1'] )
multi_corpora_data['PreparedDataObtained_collection_1'] = temp_1.to_dict()
if 'relevantDocumentsDict_collection_1' not in multi_corpora_data:
relevantDocumentsDict_collection_1 = matrix_documents_topic_contribution_1.to_dict(orient='records')
multi_corpora_data['relevantDocumentsDict_collection_1'] = relevantDocumentsDict_collection_1
if 'data_dict_2' not in multi_corpora_data:
data_dict_2 = gensim_helpers.prepare(lda_model_2, corpus_2,id2word_2)
multi_corpora_data['data_dict_2'] = data_dict_2
if 'PreparedDataObtained_collection_2' not in multi_corpora_data:
temp_2 = prepare(**multi_corpora_data['data_dict_2'] )
multi_corpora_data['PreparedDataObtained_collection_2'] = temp_2.to_dict()
if 'relevantDocumentsDict_collection_2' not in multi_corpora_data:
relevantDocumentsDict_collection_2 = matrix_documents_topic_contribution_2.to_dict(orient='records')
multi_corpora_data['relevantDocumentsDict_collection_2'] = relevantDocumentsDict_collection_2
multi_corpora_data['lda_model_1'] = lda_model_1,
multi_corpora_data['lda_model_2'] = lda_model_2,
multi_corpora_data['corpus_1'] = corpus_1
multi_corpora_data['corpus_2'] = corpus_2
multi_corpora_data['id2word_1'] = id2word_1
multi_corpora_data['id2word_2'] = id2word_2
multi_corpora_data['topic_similarity_matrix'] = topic_similarity_matrix
multi_corpora_data['topic_order_collection_1'] = multi_corpora_data['PreparedDataObtained_collection_1']['topic.order']
multi_corpora_data['topic_order_collection_2'] = multi_corpora_data['PreparedDataObtained_collection_2']['topic.order']
#save the visualization data of to a file
def save_single_corpus_data(self, route_file): #hay que indicar a si corresponde al single corpus o al multicorpora
save = True
single_corpus_data_keys = ['lda_model','corpus',
'id2word','topic_similarity_matrix', 'topic.order',
'new_circle_positions', 'relevantDocumentsDict',
'PreparedDataObtained', 'data_dict']
global single_corpus_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
single_corpus_data = single_corpus_datasets[ip]
for key in single_corpus_data_keys:
if key not in single_corpus_data.keys():
save = False
print("Error. Data it is incomplete. It is necessary to get", key)
if(save):
with open(route_file, 'wb') as handle:
pickle.dump(single_corpus_data, handle, protocol=4) #protocol 4 is compatible with python 3.6+
print("Single corpus data saved sucessfully")
def save_multi_corpora_data(self, route_file): #hay que indicar a si corresponde al single corpus o al multicorpora
save = True
global multi_corpora_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
multi_corpora_data = single_corpus_datasets[ip]
multi_corpora_data_keys = ['lda_model_1','lda_model_2',
'corpus_1','corpus_2','id2word_1','id2word_2',
'relevantDocumentsDict_collection_1','relevantDocumentsDict_collection_2',
'PreparedDataObtained_collection_1','PreparedDataObtained_collection_2',
'data_dict_1','data_dict_2','topic_order_collection_1','topic_order_collection_2',
'topic_similarity_matrix']
for key in multi_corpora_data_keys:
if key not in multi_corpora_data.keys():
save = False
print("Error. Data it is incomplete. It is necessary to get", key)
if(save):
with open(route_file, 'wb') as handle:
pickle.dump(multi_corpora_data, handle, protocol=4)
print("Multi corpora data saved sucessfully")
class TestView(FlaskView):
def load_corpus_data(self, scenario_name):#human_in_the_loop=True):
print(scenario_name)
global scenarios
#global multi_corpora_data
#global single_corpus_data
#multi_corpora_data = None
#single_corpus_data = None
scenario = scenarios[scenario_name]
with open(scenario["path"], 'rb') as handle:
loaded_scenario = pickle.load(handle)
loaded_scenario["multi"] = scenario["multi"]
#single_corpus_data['human_in_the_loop'] = human_in_the_loop
print("Data loaded sucessfully")
return loaded_scenario
@route('/')
def index(self):
# http://localhost:5000/
return render_template("user_study_code.html")
def find_url(self, user_code):
f = open('user_study_codes_and_urls.json',)
user_codes_and_urls_file = js.load(f)
for url in user_codes_and_urls_file:
current_list = user_codes_and_urls_file[url]['codes']
if int(user_code) in current_list:
return url
return 'error'
def number_minutes(self, data1, data2):
diff = data2 - data1
total_mins = (diff.days*1440 + diff.seconds/60)
return total_mins
def is_a_space_for_a_user(self, max_number_users, min_minutes): #max_number_users: 5, min_minutes: 120
df = pd.read_csv('previous_users.txt', delimiter = "-", header=None)
df.columns = ['user_code', 'user_ip', 'timestamp', 'returned_link']
df.drop_duplicates(subset=['user_code'], keep='last', inplace=True)
df['timestamp'] = df.apply(lambda row : datetime.datetime.strptime(row['timestamp'].strip(), "%a %b %d %H:%M:%S %Y"), axis = 1)
current_time = datetime.datetime.strptime(time.ctime(), "%a %b %d %H:%M:%S %Y")
current_users = 0
for index, row in df.tail(max_number_users).iterrows():
if (self.number_minutes(row['timestamp'],current_time)) < min_minutes:
print(row['timestamp'])
current_users+=1
print(current_users)
if current_users >=max_number_users:
return False
else:
return True
@route('/redirect_with_user_study_code', methods=['POST'])
def redirect_users(self):
max_number_users = 5 # maximum naumber of users
min_minutes = 120 #we cant get more than 5 users in two houts
json_file = request.get_json()
user_code = json_file['user_code']
ip = request.environ.get("HTTP_X_REAL_IP")
local_time = time.ctime()
if ip == None:
ip = 'ip_not_found'
if(self.is_a_space_for_a_user(max_number_users, min_minutes)==True):
url = self.find_url(user_code)
if(url!='error'):
file_object = open('previous_users.txt', 'a')
file_object.write('user_code_'+user_code+' - '+ip+' - '+local_time+' - '+url+'\n')
file_object.close()
else:
file_object = open('codes_with_errors.txt', 'a')
file_object.write('user_code_'+user_code+' - '+ip+' - '+local_time+' - '+url+'\n')
file_object.close()
return self.find_url(user_code)
else:
url = self.find_url(user_code)
file_object = open('max_users_reached.txt', 'a')
file_object.write('user_code_'+user_code+' - '+ip+' - '+local_time+' - '+url+'\n')
file_object.close()
return 'max_users_reached'
@route('/MultiCorpora_documents_1')
def get_documents_data_multicorpus_1(self):
global multi_corpora_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
multi_corpora_data = multi_corpora_datasets[ip]
#return Response(js.dumps(random.sample(multi_corpora_data['relevantDocumentsDict_collection_1'],10)), mimetype='application/json')
return Response(js.dumps(multi_corpora_data['relevantDocumentsDict_collection_1']), mimetype='application/json')
@route('/MultiCorpora_documents_2')
def get_documents_data_multicorpus_2(self):
global multi_corpora_datasets
ip = request.environ.get("HTTP_X_REAL_IP")
multi_corpora_data = multi_corpora_datasets[ip]
#return Response(js.dumps(random.sample(multi_corpora_data['relevantDocumentsDict_collection_2'],10)), mimetype='application/json')
return Response(js.dumps(multi_corpora_data['relevantDocumentsDict_collection_2']), mimetype='application/json')
@route('/SingleCorpus_documents')
| |
or is_cutting):
def move_to():
temp_cursor = get_cursor(handle)[0]
handle.insert(temp_cursor, '\n')
handle.mark_set(tk.INSERT, temp_cursor)
apply_function()
window.after(0, move_to)
# The delays are necessary to solve complications for text modified by the key after this function fires
if not just_function_key:
if handle is comments_text_box:
window.after(0, lambda: (apply_comment_changes(), highlight_stuff(event.widget)))
else:
window.after(0, lambda: (highlight_stuff(event.widget)))
base_file_text_box.bind('<Key>', lambda event:
keyboard_events(base_file_text_box, disassembly_max_chars, event, buffer=False))
hack_file_text_box.bind('<Key>', lambda event:
keyboard_events(hack_file_text_box, disassembly_max_chars, event, buffer=hack_buffer, hack_function=True))
comments_text_box.bind('<Key>', lambda event:
keyboard_events(comments_text_box, comments_max_chars, event, buffer=comments_buffer))
# The button is destroyed and remade every time the user scrolls within it's view
change_rom_name_button = tk.Button()
def change_rom_name():
if not disassembler_loaded():
return
new_name = simpledialog.askstring('Change rom name', '20 Characters maximum')
if new_name:
if len(new_name) < 20:
new_name += ' ' * (20 - len(new_name))
elif len(new_name) > 20:
new_name = new_name[:20]
# new_name = new_name.upper()
for i in range(20):
disasm.hack_file[disasm.header_items['Rom Name'][0] + i] = ord(new_name[i])
disasm.comments['9'] = new_name
navigate_to(navigation)
def destroy_change_rom_name_button():
global change_rom_name_button
if change_rom_name_button:
change_rom_name_button.destroy()
change_rom_name_button = None
def navigate_to(index, center=False, widget=None, region_treatment=False, region_override=False):
global navigation, change_rom_name_button, prev_cursor_location
if not disassembler_loaded():
return
destroy_change_rom_name_button()
if (region_treatment and disasm.game_address_mode) or region_override:
index = disasm.region_unalign(index << 2, invert=False, game_offset=True) >> 2
indexed = index
if center:
index -= max_lines >> 1
shift_amount = navigation
# Correct the navigation if traveling out of bounds, also calculate limits for file samples to display
amount_words = disasm.file_length >> 2
navigation = index if index + max_lines < amount_words else amount_words - max_lines
if navigation < 0:
navigation = 0
limit = navigation + max_lines if navigation + max_lines < amount_words else amount_words
lines = limit - navigation
if center:
prev_cursor_location = indexed
shift_amount -= navigation
# Sample bytes out of files
file_nav = navigation * 4
base_sample = disasm.base_file[file_nav:file_nav + ((limit if limit < max_lines else max_lines) * 4)]
hack_sample = disasm.hack_file[file_nav:file_nav + ((limit if limit < max_lines else max_lines) * 4)]
# Translate each 4 lot of bytes into separate integers
ints_in_base_sample = ints_of_4_byte_aligned_region(base_sample)
ints_in_hack_sample = ints_of_4_byte_aligned_region(hack_sample)
# Create blank comments box, then fill with any comments that have been made (we don't store any blank comments)
sample_comments = [''] * lines
for i in range(lines):
string_key = '{}'.format(navigation + i)
if string_key in disasm.comments.keys():
sample_comments[i] = disasm.comments[string_key]
# Calculate what addresses to display in the address box
addresses = [i * 4 for i in range(navigation, limit)]
if disasm.game_address_mode:
for i, address in enumerate(addresses):
in_region = False
for reg in disasm.memory_regions:
rom_start = reg[0] - reg[2]
rom_end = (reg[0] + reg[1]) - reg[2]
if address in range(rom_start, rom_end):
addresses[i] += reg[2]
in_region = True
break
if not in_region:
addresses[i] += disasm.game_offset
address_range = [extend_zeroes(hexi(i), 8) for i in addresses]
hex_or_bin = app_config['hex_mode'] or app_config['bin_mode']
# Disassemble ints into instructions, or display as hex or bin
base_disassembled = []
hack_disassembled = []
for i in range(len(ints_in_base_sample)):
if navigation + i < 16 or app_config['hex_mode']:
base_disassembled.append(hex_space(extend_zeroes(hexi(ints_in_base_sample[i]), 8)))
hack_disassembled.append(hex_space(extend_zeroes(hexi(ints_in_hack_sample[i]), 8)))
elif app_config['bin_mode']:
base_disassembled.append(space_bindies(extend_zeroes(bindies(ints_in_base_sample[i]), 32)))
hack_disassembled.append(space_bindies(extend_zeroes(bindies(ints_in_hack_sample[i]), 32)))
else:
base_disassembled.append(disasm.decode(ints_in_base_sample[i], navigation + i))
hack_disassembled.append(disasm.decode(ints_in_hack_sample[i], navigation + i))
# base_disassembled = [disasm.decode(ints_in_base_sample[i], navigation + i)
# if navigation + i > 15 and not hex_or_bin else
# hex_space(extend_zeroes(hexi(ints_in_base_sample[i]), 8))
# for i in range(len(ints_in_base_sample))]
# hack_disassembled = [disasm.decode(ints_in_hack_sample[i], navigation + i)
# if navigation + i > 15 and not hex_or_bin else
# hex_space(extend_zeroes(hexi(ints_in_hack_sample[i]), 8))
# for i in range(len(ints_in_hack_sample))]
# Adjust branches or jumps to offset to custom regions
for disassembly in [base_disassembled, hack_disassembled] if not hex_or_bin else []:
for i, text in enumerate(disassembly):
try:
space_in = text.find(' ')
immid_in = text.rfind(app_config['immediate_identifier'])
if space_in >= 0 and immid_in >= 0:
if text[:space_in] in BRANCH_FUNCTIONS + ['J', 'JAL']:
address = deci(text[immid_in + 1:])
if text[:space_in] in ['J', 'JAL'] and not disasm.game_address_mode:
address = disasm.region_unalign(address, game_offset=True)
elif text[:space_in] in BRANCH_FUNCTIONS and disasm.game_address_mode:
address = disasm.region_align(address, game_offset=True)
new_text = text[:immid_in + 1] + extend_zeroes(hexi(address), 8)
disassembly[i] = new_text
except Exception as e:
print(e)
# Replace disassembled data in the hack file with any errors the user has made
for i in range(len(hack_disassembled)):
if not hack_disassembled[i]:
hack_disassembled[i] = 'UNKNOWN/NOT AN INSTRUCTION'
if not base_disassembled[i]:
base_disassembled[i] = 'UNKNOWN/NOT AN INSTRUCTION'
string_key = '{}'.format(navigation + i)
if string_key in user_errors.keys():
hack_disassembled[i] = user_errors[string_key][1]
# Display floating Rom Name Change button
if disasm.header_items['Rom Name'][0] // 4 in range(navigation, limit):
bg, fg = app_config['text_bg_colour'], app_config['text_fg_colour']
change_rom_name_button = tk.Button(window, text = 'Change', command = change_rom_name,
bg=bg, fg=fg,
activebackground=bg, activeforeground=fg)
c_w, c_h, c_x, c_y = geometry(comments_text_box.winfo_geometry())
font_w, font_h = font_dimension(main_font_size)
x_offset = (font_w * 22) + 5
y_offset = ((disasm.header_items['Rom Name'][0] // 4) - navigation) * font_h
change_rom_name_button.place(x = c_x + x_offset, y = 36 + y_offset, height = 20)
# Update all 4 text boxes
def update_text_box(handle, text):
text = '\n'.join(text)
cursor, line, column = get_cursor(handle)
handle.delete('1.0', tk.END)
handle.insert('1.0', text)
new_line = line + shift_amount
if new_line < 1 or new_line > max_lines:
new_cursor_loc = cursor_value(keep_within(new_line, 1, max_lines), 0)
else:
new_cursor_loc, _, __ = modify_cursor(cursor, shift_amount, 0, text)
handle.mark_set(tk.INSERT, new_cursor_loc)
params = [[address_text_box, address_range],
[base_file_text_box, base_disassembled],
[hack_file_text_box, hack_disassembled],
[comments_text_box, sample_comments]]
[update_text_box(param[0], param[1]) for param in params]
if center:
widgey = widget if widget else hack_file_text_box
new_cursor = modify_cursor('1.0', max_lines >> 1, 'max', get_text_content(widgey))[0]
widgey.mark_set(tk.INSERT, new_cursor)
elif prev_cursor_location in range(navigation, limit):
line = prev_cursor_location - navigation
new_cursor, _, __ = modify_cursor('1.0', line, 'max', get_text_content(hack_file_text_box))
hack_file_text_box.mark_set(tk.INSERT, new_cursor)
highlight_stuff(widget, skip_moving_cursor=center)
updateWindowScrollbarPos()
def navigation_callback(address):
widget = check_widget(window.focus_get())
if not address and widget:
widget.focus_force()
try:
address = deci(address)
if disasm.game_address_mode:
address -= disasm.game_offset
address >>= 2
except:
if widget:
widget.focus_force()
return
apply_hack_changes()
apply_comment_changes()
reset_target()
navigate_to(address, center=True, widget=widget, region_treatment=True)
if widget:
widget.focus_force()
def navigation_prompt(root=window):
if not disassembler_loaded():
return
address = simpledialog.askstring('Navigate to address', '', parent=root)
if not address:
return
navigation_callback(address)
def scroll_callback(event,numUnits=1):
if not disassembler_loaded():
return
apply_hack_changes()
apply_comment_changes()
direction = -app_config['scroll_amount'] if event.delta > 0 else app_config['scroll_amount']
navigate_to(navigation + direction * numUnits, widget=check_widget(window.focus_get()))
def save_changes_to_file(save_as=False):
if not disassembler_loaded():
return False
apply_hack_changes()
apply_comment_changes()
# Do not save changes if there are errors
for key in user_errors:
navigate_to(int(key), widget=hack_file_text_box, center=True)
return False
if app_config['calc_crc'][disasm.hack_file_name]:
status_text.set('Calculating checksum...')
window.update()
sum1, sum2 = disasm.calc_checksum()
else:
sum1 = sum2 = 0
if app_config['calc_crc'][disasm.hack_file_name] and navigation <= disasm.header_items['CRC2'][0] >> 2:
navigate_to(navigation)
if save_as:
new_file_name = filedialog.asksaveasfilename(initialdir = app_config['previous_hack_location'],
title = 'Save as...')
if not new_file_name:
return False
new_file_path = os.path.realpath(new_file_name)
if new_file_path == disasm.base_folder + disasm.base_file_name:
simpledialog.messagebox._show('Wait a sec', 'You shouldn\'t select the base file')
return False
new_file_name = new_file_path[new_file_path.rfind('\\') + 1:]
if not '.' in new_file_name:
dot = disasm.hack_file_name.rfind('.')
new_file_name += disasm.hack_file_name[dot:]
new_dir = new_file_path[:new_file_path.rfind('\\') + 1]
new_file_path = new_dir + new_file_name
if exists(new_file_path):
simpledialog.messagebox._show('Sorry', 'That file already exists.')
return False
app_config['previous_hack_location'] = new_dir
app_config['previous_hack_opened'] = new_file_path
app_config['hack_of_base'][new_file_name] = app_config['hack_of_base'][disasm.hack_file_name]
app_config['calc_crc'][new_file_name] = app_config['calc_crc'][disasm.hack_file_name]
app_config['memory_regions'][new_file_name] = app_config['memory_regions'][disasm.hack_file_name].copy()
app_config['remember_batch'][new_file_name] = app_config['remember_batch'][disasm.hack_file_name]
app_config['remember_script'][new_file_name] = app_config['remember_script'][disasm.hack_file_name]
disasm.hack_file_name = new_file_name
disasm.comments_file = new_file_path + ' comments.txt'
disasm.jumps_file = new_file_path + ' jumps.data'
window.title('ROM Disassembler - ' + disasm.hack_file_name)
app_config['CIC'][disasm.hack_file_name] = disasm.cic
app_config['jumps_displaying'][disasm.hack_file_name] = jumps_displaying.copy()
app_config['game_address_mode'][disasm.hack_file_name] = disasm.game_address_mode
save_config()
with open(disasm.jumps_file, 'wb') as jumps_file:
dump((disasm.jumps_to, disasm.branches_to, disasm.jalr_list), jumps_file)
with open(disasm.hack_folder + disasm.hack_file_name, 'wb') as file:
file.write(disasm.hack_file)
_filename = disasm.comments_file + '(Backup '
if exists(disasm.comments_file):
i = 0
while True:
i += 1
if not exists(_filename + str(i) + ').txt'):
_filename += str(i) + ').txt'
with open(_filename, 'w') as backup_comments_file:
with open(disasm.comments_file, 'r') as comments_file:
backup_comments_file.write(comments_file.read())
break
try:
with open(disasm.comments_file, 'w') as file:
file.write(dict_to_string(disasm.comments))
if _filename != disasm.comments_file + '(Backup ':
os.remove(_filename)
except Exception as e:
simpledialog.messagebox._show('Error', 'There was trouble saving your comments file. '
'A backup of your old comments can be found next to the original comments file. '
'Your rom file was saved without error.'
'\n\n' + str(e))
checksum_text = ' Checksum calculated - CRC1: {} | CRC2: {}'.format(
extend_zeroes(hexi(sum1), 8),
extend_zeroes(hexi(sum2), 8))
message = 'Rom Saved.'
if app_config['calc_crc'][disasm.hack_file_name]:
message += checksum_text
status_text.set(message)
window.update()
return True
def destroy_them(not_main=False):
global colours_window, jumps_window, comments_window, dimension_window, manual_cic_win
global changes_win, opcodes_win, script_win, phrases_win, mem_regions_win, hex_win
if changes_win:
changes_win.destroy()
changes_win = None
if jumps_window:
jumps_window.destroy()
jumps_window = None
if comments_window:
comments_window.destroy()
comments_window = None
if dimension_window:
dimension_window.destroy()
dimension_window = None
if manual_cic_win:
manual_cic_win.destroy()
manual_cic_win = None
if script_win:
script_win.destroy()
script_win | |
#coding:utf_8
import os
import json
import requests
import hashlib
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import PKCS1_OAEP, PKCS1_v1_5
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
from binascii import b2a_hex, a2b_hex
from Cryptodome.Cipher import AES
from Cryptodome import Random
import base64
from threading import Timer
from sql_operation import db_operation,token,server_operation,msg_operation,token_consume_operation,msg_check_consume_operation
import time
import rsa
from flask import request
from binascii import a2b_base64, b2a_base64
op=db_operation()
server=server_operation()
msg=msg_operation()
tc=token_consume_operation()
mcc=msg_check_consume_operation()
class server_opr:
def search_by_kid(self,kid):
res=server.search_by_kid(kid)
print(res)
return res
def serach_token_num(self,url):
res=tc.search_by_url(url)
print(res)
return res
def serach_msg_num(self,url):
res=mcc.search_by_url(url)
print(res)
return res
def insert_token_msg(self,url):
if tc.checkhave(url) == 1:
print('已有记录。')
resu = {'code': 200, 'msg': '已有记录。'}
return resu
else:
tc.insert(url)
mcc.insert(url)
resu = {'code': 200, 'msg': '已有记录。'}
return resu
# def show_token(self,name):
class token_consume:
# 查看当前消费类型的值
def search_url_msg(self, url):
return tc.search_num(url)
# return {'code': 200, 'result': result}
#消费后更新数字或type
def update_num(self,url,num,operator):
print(operator)
if operator=='add_free':
tc.add_free(url,num)
elif operator=='add_times':
# print('-----------------------------')
tc.add_times(url,num)
elif operator=='add_newdata':
tc.add_newdata(url,num)
return {'code': 200, 'result': '成功'}
#获取当前消费类型
def get_type(self,url):
result=tc.search_by_url(url)
type=result['type']
return {'code':200,'type':type}
def update_type(self,url,type):
tc.set_type(url, type)
#查看当前消费url是否存在记录
def check_have(self,url):
if tc.checkhave(url)==0:
return {'code':10000,'msg':'没有找个用户的记录'}
else:
return {'code':200,'msg':'存在记录'}
class msg_check_consume:
# 查看当前消费类型的值
def search_url_msg(self, url):
return mcc.search_num(url)
# return {'code': 200, 'result': result}
# 消费后更新数字或type
def update_num(self, url, num, operator):
print(operator)
if operator == 'add_free':
mcc.add_free(url, num)
elif operator == 'add_free':
# print('-----------------------------')
mcc.add_times(url, num)
elif operator == 'add_newdata':
mcc.add_newdata(url, num)
return {'code': 200, 'result': '成功'}
# 获取当前消费类型
def get_type(self, url):
result = mcc.search_by_url(url)
type = result['type']
return {'code': 200, 'type': type}
def update_type(self, url, type):
mcc.set_type(url, type)
# 查看当前消费url是否存在记录
def check_have(self, url):
print('1111111144444444444444')
if mcc.checkhave(url) == 0:
return {'code': 10000, 'msg': '没有找个用户的记录'}
else:
return {'code': 200, 'msg': '存在记录'}
class token_check:
"""MD5 base64 AES RSA 四种加密方法"""
def __init__(self):
# self.server_url='123'
# self.cypher_text = ''
# self.curr_dir = os.path.dirname(os.path.realpath(__file__))
# self.server_private_key_file = os.path.join(self.curr_dir, "server_private_rsa_key.bin")
# self.server_public_key_file = os.path.join(self.curr_dir, "server_rsa_public.pem")
self.server_private_key=''
self.server_public_key=''
def check_have(self,kid):
print(server.checkhave2(kid))
if server.checkhave2(kid)==0:
return {'code':10000,'msg':'没有找个用户的记录'}
else:
return {'code':200,'msg':'存在记录'}
def insert_token(self,url,user_id1,time_code1,token1):
op.init(url)
op.insert(user_id1,time_code1,token1)
return {'code':200,'result':'操作成功'}
def dele_by_uid(self,url,user_id):
op.init(url)
op.deleteis(user_id)
return {'code': 200,'result':'操作成功'}
def update_by_uid(self,url,uer_id,time_code,token):
op.init(url)
op.update(uer_id,time_code,token)
return {'code': 200,'result':'操作成功'}
def search_by_uid(self,url,user_id):
op.init(url)
return {'code': 200,'result':op.search_by_user_id(user_id)}
def search_all(self,url):
# print('------------------------------')
op.init(url)
# dict={}
# print('------------------------------')
dict=op.search_all()
return {'code': 200,'result':dict}
# def updatekey(self):
# def init_url(self,url):
# op.init
# 时间戳生成器
def get_time(self):
t = time.time()
# print(t) # 原始时间数据
# print(int(t)) # 秒级时间戳
# print(int(round(t * 1000))) # 毫秒级时间戳
print(int(round(t * 1000000))) # 微秒级时间戳
return int(round(t * 1000000))
# 生成随机数,用于rsa参数生成
def rsa_random_num(self):
return str(self.get_time())
# 中间人为后端提供的解密接口
# def server_decode(self,Decrypts):
# data = request.get_data()
# data = data.decode('utf-8')
# data = json.loads(data)
# cypher_text = data['cypher_text']
# source_text=data['source']
# plain_text=Decrypts.rsa_decode('huctf',cypher_text,self.server_private_key_file)
#获得user_id对应的token
def search_token_by_id(self,user_id,url):
op.init(url)
# print('___________aaa')
result = op.search_by_user_id(user_id)
if result!=[]:
token_consume.update_num(token_consume,url,-1,'add_free')
token=result['token']
resu = {'code': 200, 'token': token,'msg':'查询成功'}
return resu
else:
resu = {'code': 10000, 'msg':"token已更新,请重新登陆" }
return resu
#y用于数据加密
# def server_encode(self, Encrypts,user_id):
# result=self.get_token_by_id(user_id)
# if result['code']==10000:
# resu = {'code': 10000, 'result': "token已更新,请重新登陆"}
# return resu
# elif result['code']==200:
# test=1
#根据user_id请求一个token
def server_get_token(self,user_id,url):
op.init(url)
if op.checkhave(user_id)==1:
dict=op.search_by_user_id(user_id)
token=dict['token']
print('数据库已有记录')
return {'code': 201, 'token': token,'msg':'数据库已有记录'}
else:
random_num = self.rsa_random_num()
# print(random_num)
# token由时间戳+user_id+'huctf'的明文加密而成,加密算法可以是其他非对称算法
plain_text = random_num + user_id + 'huctf'
cypher_text = Encrypts.md5_encrypt( plain_text)
token =cypher_text
op.insert(user_id,random_num,token)
print('申请token成功')
resu = {'code': 200, 'token': token,'msg':'申请token成功'}
return resu
# except:
# resu = {'code': 10001, 'msg': '未知错误。'}
# return resu
# # 中间人为后端提供公钥的接口
# def mid_server_transport_Key(self):
# pub_data = {
# 'private_key': open(self.server_private_key_file).read(),
# 'public_key': open(self.server_publiv_key_file).read(),
# }
# resu = pub_data
# return resu
# 创建一组中间人与后端的密钥
def create_mid_server_key(self,kid,url):
# print('0')
if server.checkhave(kid,url) == 1:
print('已有记录。')
resu = {'code': 200, 'msg': '已有记录。'}
return resu
else:
# print("have________----")
token_tb='token_'+url
msg_tb='msg_check_'+url
# print('0')
server.insert(kid,url,token_tb,msg_tb)
# print('111111111111111111111')
server.create_new_table(token_tb,msg_tb)
print('初始化成功。')
resu = {'code': 200, 'msg': '初始化成功。'}
return resu
#定时任务
def timedTask(self):
#每天更新一次
Timer(86400, self.task, ()).start()
# 定时任务,定时更新密钥
def task(self):
#用于删除一天以上的密钥
# op.delete_task()
server.delete_task()
class msg_random_check:
"""MD5 base64 AES RSA 四种加密方法"""
def __init__(self):
self.private_key = ''
self.public_key = ''
self.sq=''
def check_have(self,kid):
print(server.checkhave2(kid))
if server.checkhave2(kid)==0:
return {'code':10000,'msg':'没有找个用户的记录'}
else:
return {'code':200,'msg':'存在记录'}
def insert_msg(self,url,sq,puk,prk,time_code):
msg.init(url)
msg.insert(sq,puk,prk,time_code)
return {'code':200,'result':'操作成功'}
def deleteis_by_sq(self,url,sq):
msg.init(url)
msg.deleteis_by_sq(sq)
return {'code': 200,'result':'操作成功'}
def update_by_sq(self,url,sq,puk,prk,time_code):
msg.init(url)
msg.update(sq,puk,prk,time_code)
return {'code': 200,'result':'操作成功'}
def search_by_sq(self,url,sq):
msg.init(url)
result=msg.search_by_sq(sq)
return {'code':200,'result':result}
def search_all(self,url):
msg.init(url)
result=msg.search_all()
return {'code':200,'result':result}
# 时间戳生成器
def get_time(self):
t = time.time()
print(int(round(t * 1000000))) # 微秒级时间戳
return str(int(round(t * 1000000)))
# 中间人为前端提供的加密接口
def mid_front_encode(self,plain_text,sq,url):
puk=self.mid_front_transport_pubKey(sq,url)
if (puk['code'] == 200):
puk = puk['puk']
return {'code': 200, 'result': Encrypts.rsa_encode(puk,plain_text)}
else:
return {'code': 10000, 'msg': '未找到公钥'}
#<script src="http://passport.cnblogs.com/scripts/jsencrypt.min.js"></script>
#function encrypt(req_url,self_url,data){
# $.ajax({
# url:req_url,
# type:'post',
def mid_sever_decode(cypher,sq,url):
#print(cypher,sq,url)
#print(11212221)
prk=msg_random_check.mid_server_transport_priKey2(sq,url)
#print(prk)
cypher=cypher.replace(" ","")
cypher=cypher.replace("@","+")
#cypher=cypher[:-1]
print(cypher)
prk=RSA.importKey(prk)
prk0=Cipher_pkcs1_v1_5.new(prk)
# if(prk['code']==200):
# prk=prk['prk']
result=prk0.decrypt(base64.b64decode(cypher),prk0)
msg.deleteis_by_sq(sq)
print(result)
msg_check_consume.update_num(msg_check_consume,url,-1,'add_free')
return {'code':200,'result':str(result)}
# else:
# return {'code':10000,'msg':'未找到私钥'}
#中间人为后端提供的解密接口
# def mid_sever_decode(self,cypher,sq,url):
# prk=self.mid_server_transport_priKey2(sq,url)
# print(cypher)
# prk=prk.replace("\\n", "")[27:-25]
# start = '-----BEGIN RSA PRIVATE KEY-----\n'
# end = '-----END RSA PRIVATE KEY-----'
# length = len(prk)
# divide = 64 # 切片长度
# offset = 0 # 拼接长度
# result0=''
# while length - offset > 0:
# if length - offset > divide:
# result0 += prk[offset:offset + divide] + '\n'
# else:
# result0 += prk[offset:] + '\n'
# offset += divide
# result0 = start + result0 + end
#
# prk=result0
# prk=RSA.importKey(prk)
# prk0=Cipher_pkcs1_v1_5.new(prk)
# # if(prk['code']==200):
# # prk=prk['prk']
# result=prk0.decrypt(base64.b64decode(cypher),prk0)
# # msg.deleteis_by_sq(sq)
#
# print(result.decode('utf-8'))
# return {'code':200,'result':result.decode('utf-8')}
# else:
# return {'code':10000,'msg':'未找到私钥'}
def check_sq(self,url,sq):
return msg.checkhave(sq)
# 中间人为前端提供公钥的接口
def mid_front_transport_pubKey(self,sq,url):
msg.init(url)
result=msg.search_by_sq(sq)
if result!=[]:
return {'code': 200, 'puk': result['puk']}
else:
result={'code':10000,'msg':'没有这个序列号'}
return result
#中间人为后端提供私钥的接口
def mid_server_transport_priKey(self,sq,url):
print(sq)
msg.init(url)
#if self.check_sq(self,sq,url) !=1:
result=msg.search_by_sq(sq)
#print('\n\n\n\n\n\n',result)
if result!=[]:
prk=result['prk']
print(prk)
msg.deleteis_by_sq(sq)
return {'code':200,'prk':prk}
else:
result={'code':10000,'msg':'未找到私钥'}
return result
#else:
# result = {'code': 10001, 'msg': '没有这个序列号'}
# return result
#中间人为自己提供私钥的接口
def mid_server_transport_priKey2(sq,url):
print(sq)
msg.init(url)
#if self.check_sq(self,sq,url) !=1:
result=msg.search_by_sq(sq)
#print('\n\n\n\n\n\n',result)
if result!=[]:
prk=result['prk']
#msg.deleteis_by_sq(sq)
#delete_key(self,url,sq)
print(prk)
return(prk)
else:
result={'code':10000,'msg':'未找到私钥'}
return result
#else:
# result = {'code': 10001, 'msg': '没有这个序列号'}
# return result
#用于生成序列号并且生成相应的rsa key,插入数据库中
def create_seq(self,url):
time_code=self.get_time()
strr=time_code+'huctf'
sq=Encrypts.md5_encrypt(strr)
self.sq=sq
return self.create_key(str(sq),url,time_code)
# 创建一组密钥
def create_key(self,sq,url,time_code):
# print('000000000000000000000')
msg.init(url)
# print(msg.checkhave(sq))
if msg.checkhave(sq) == 1:
print('已有记录。')
resu = {'code': 200,'sq':sq,'puk':self.public_key, 'msg': '数据已创建。'}
return resu
else:
# print('222222')
self.public_key,self.private_key=Encrypts.generate_rsa_keys(Encrypts)
# print('00000')
msg.insert(sq,self.public_key,self.private_key,time_code)
print('数据创建成功。')
resu = {"code": 200,"sq":str(sq),"puk":str(self.public_key)[2:-1], "msg": "数据创建成功。"}
return resu
# 用于删除失效密钥
def delete_key(self,url,sq):
msg.init(url)
msg.deleteis_by_sq(sq)
class Encrypts:
"""MD5 base64 AES RSA 四种加密方法"""
def __init__(self):
self.aes_mode = AES.MODE_ECB # AES加密模式
self.aes_key_size = 256 # AES秘钥,随机数值
self.rsa_count = 1024 # RSA秘钥对,随机数值
def md5_encrypt(plaintext):
""" MD5加密
:param plaintext: 需要加密的内容
:return: encrypt_str密文
"""
h1 = hashlib.md5() # 创建md5对象
h1.update(plaintext.encode(encoding='utf-8')) # 必须声明encode
# 加密
encrypt_str = h1.hexdigest()
return encrypt_str
def base64_encry(plaintext):
"""base64加密"""
base64_encry = base64.b64encode(plaintext.encode('utf-8'))
return base64_encry
def generate_aes_key(self):
"""AES秘钥生成"""
# length for urandom
key_size = self.aes_key_size
u_len = int(key_size / 8 / 4 * 3)
aes_key = base64.b64encode(os.urandom(u_len)) # os.urandom()生成随机字符串
return aes_key
def aes_encrypt(self, message, aes_key):
"""use AES to encrypt message,
:param message: 需要加密的内容
:param aes_key: 密钥
:return: encrypted_message密文
"""
mode = self.aes_mode # 加密模式
if type(message) == str:
message = bytes(message, 'utf-8')
if type(aes_key) == str:
aes_key = bytes(aes_key, 'utf-8')
# aes_key, message必须为16的倍数
while len(aes_key) % 16 != 0:
aes_key += b' '
while len(message) % 16 != 0:
message += b' '
# 加密对象aes
aes = AES.new(key=aes_key, mode=mode)
encrypt_message = aes.encrypt(plaintext=message)
return b2a_hex(encrypt_message)
def generate_rsa_keys(self):
"""RSA秘钥对生成"""
rsa_count = 1024
# 随机数生成器
random_generator = Random.new().read
# rsa算法生成实例
rsa = RSA.generate(rsa_count, random_generator)
# master的秘钥对的生成
rsa_public_key = rsa.publickey().exportKey()
rsa_private_key = rsa.exportKey()
# with open(private_key, "wb") as f:
# f.write(rsa_public_key)
# with open(public_key, "wb") as f:
# f.write(rsa_private_key)
return rsa_public_key, rsa_private_key
def rsa_encrypt(self,message, rsa_public_key):
"""use RSA to encrypt message,
:param message: 需要加密的内容
:param rsa_public_key: 公钥(字节类型)
:return: encrypt_msg_list密文列表
"""
pub_key = RSA.importKey(rsa_public_key)
# 加密对象
cipher = Cipher_pkcs1_v1_5.new(pub_key)
msg = message.encode('utf-8')
# 分段加密
# default_encrypt_length = 245
# length = default_encrypt_length
# msg_list = [msg[i:i + length] for i in list(range(0, len(msg), length))]
# 加密后信息列表
# encrypt_msg_list = []
# for msg_str in msg_list:
cipher_text = base64.b64encode(cipher.encrypt(message=msg))
# encrypt_msg_list.append(cipher_text)
return cipher_text
def create_rsa_key(self, private_key='server_private_rsa_key.bin', public_key='server_rsa_public.pem'):
key = RSA.generate(1024)
encrypted_key = key.exportKey(passphrase='<PASSWORD>', pkcs=8, protection="scryptAndAES128-CBC")
# print(public_key)
with open(private_key, "wb") as f:
f.write(encrypted_key)
with open(public_key, "wb") as f:
f.write(key.publickey().exportKey())
def rsa_encode(self,public_key_file,plaintext):
# 加载公钥
plaintext=plaintext.encode(encoding='utf-8')
recipient_key = RSA.import_key(
open(public_key_file).read()
)
cipher_rsa = PKCS1_v1_5.new(recipient_key)
en_data = cipher_rsa.encrypt(plaintext)
print(len(en_data), en_data)
return en_data
class Decrypts:
"""base64 AES RSA | |
from numpy import array
def scigrid_2011_01_04_03():
ppc = {"version": '2'}
ppc["baseMVA"] = 100.0
ppc["bus"] = array([
[586, 3, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[589, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[590, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[593, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[594, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[595, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[598, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[599, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[601, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[602, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[603, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[607, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[608, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[609, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[612, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[613, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[614, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[616, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[617, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[618, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[619, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[621, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[624, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[629, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[632, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[637, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[638, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[640, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[641, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[642, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[643, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[647, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[650, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[652, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[655, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[661, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[663, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[666, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[668, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[670, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[672, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[676, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[681, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[683, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[687, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[691, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[693, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[694, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[695, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[696, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[697, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[698, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[702, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[704, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[705, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[707, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[713, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[714, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[716, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[717, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[719, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[722, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[724, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[727, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[728, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[730, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[732, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[735, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[738, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[741, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[742, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[743, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[746, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[747, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[748, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[749, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[750, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[753, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[758, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[760, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[762, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[763, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[765, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[767, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[769, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[771, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[772, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[774, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[777, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[778, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[781, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[784, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[785, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[787, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[788, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[789, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[791, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[792, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[795, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[800, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[801, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[802, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[805, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[806, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[808, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[809, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[811, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[814, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[816, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[817, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[818, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[821, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[822, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[826, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[830, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[834, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[835, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[836, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[837, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[839, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[841, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[843, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[844, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[845, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[849, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[850, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[851, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[853, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[855, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[856, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[857, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[858, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[860, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[862, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[863, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[864, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[865, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[867, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[869, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[870, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[872, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[874, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[875, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[877, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[882, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[883, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[885, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[886, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[889, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[890, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[893, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[894, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[895, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[896, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[898, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[900, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[902, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[903, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[905, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[906, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[907, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[909, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[915, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[917, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[918, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[920, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[921, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[922, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[923, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[925, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[928, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[931, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[935, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[936, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[937, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[939, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[940, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[942, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[944, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[945, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[950, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[952, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[958, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[959, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[960, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[963, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[965, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[966, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[967, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[968, 2, 0, 0, 0, 0, 0, 0.999633, 0, 220.0, 0, 1.1, 0.9 ],
[969, 2, 0, 0, 0, 0, 0, 0.999633, 0, 220.0, 0, 1.1, 0.9 ],
[971, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[973, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[976, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[978, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[981, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[982, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[983, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[984, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[985, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[986, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[987, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[988, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[993, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[994, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[995, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[997, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[999, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1000, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1002, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1003, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1007, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1008, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1010, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1011, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1012, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1014, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1026, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1027, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1028, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1029, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1030, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1031, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1032, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1033, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1034, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1035, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1036, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1037, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1038, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1039, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1041, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1042, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1044, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1046, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1047, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1048, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1049, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1050, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1051, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1052, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1053, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1054, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1055, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1056, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1057, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1058, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1059, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1060, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1061, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1062, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1063, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1064, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1065, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1066, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1069, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1072, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1073, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1074, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1077, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1078, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1079, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1080, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1081, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1082, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1083, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1084, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1085, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1086, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1087, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1088, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1089, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1090, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1091, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1092, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1093, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1094, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1095, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1096, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1097, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1098, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1099, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1101, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1102, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1103, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1104, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1105, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1106, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1107, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1108, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1109, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1110, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1111, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1112, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1113, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1115, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1116, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1117, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1118, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1119, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1120, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1121, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1122, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1123, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1124, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1125, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1126, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1127, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1128, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1129, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1130, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1131, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1132, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1133, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1134, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1135, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1136, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1137, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1138, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1139, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1140, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1141, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1142, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1143, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1144, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1145, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1146, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1147, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1148, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1149, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1150, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1151, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1152, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1153, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1154, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1155, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1156, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1157, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1158, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1159, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1160, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1161, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1162, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1163, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1164, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1165, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1166, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1167, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1168, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1169, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1170, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1173, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1174, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1175, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1176, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1177, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1178, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1179, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1180, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1181, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1182, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1183, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1184, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1185, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1186, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1187, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1188, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1189, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1192, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1193, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1194, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1196, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1197, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1198, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1199, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1200, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1202, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1204, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1206, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1207, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1211, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1212, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1213, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1214, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1216, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1217, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1218, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1219, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1220, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1221, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1222, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1224, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1225, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1226, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1228, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1229, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1230, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1231, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1233, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1235, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1236, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1239, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1240, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1241, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1243, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1244, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1245, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1246, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1248, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1249, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1250, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1251, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1252, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1253, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1254, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1255, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1256, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1257, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1258, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1259, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1260, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1261, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1264, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1265, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1266, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1267, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1274, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1275, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1276, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1277, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1278, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1282, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1283, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1284, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1286, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1287, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1288, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1289, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1290, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1291, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1292, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1293, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1294, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1295, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1300, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1301, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1302, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1303, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1306, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1307, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1308, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1312, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1317, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1319, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1323, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1324, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1325, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1326, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1327, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1328, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1329, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1330, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1331, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1336, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1337, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1339, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1340, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1341, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1346, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1347, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1348, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1349, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1356, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1357, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1359, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1360, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1361, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1362, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1363, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1364, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1365, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1366, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1372, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1373, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1374, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1375, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1376, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1377, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1378, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1379, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1380, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1381, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1382, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1383, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1384, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1385, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1386, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1387, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1388, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1389, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1390, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1391, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1392, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1393, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1394, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1395, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1397, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1398, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1399, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1401, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1402, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1403, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1404, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1405, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1406, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1407, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1408, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1409, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1410, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1411, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1412, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1413, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1414, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1418, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1419, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1422, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1424, 2, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[1425, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1426, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1427, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1428, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1431, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1432, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1433, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1434, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1435, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1436, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1437, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1438, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1439, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1440, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1442, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1443, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1444, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1445, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1446, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1447, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1448, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1449, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1450, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1451, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1452, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1454, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1455, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1456, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1457, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1458, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1459, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1460, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1461, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1462, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1463, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1465, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1466, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1467, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1468, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1469, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1470, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1471, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1472, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1473, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1474, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1475, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1476, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1477, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1482, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1483, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1484, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1485, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1486, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1489, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1490, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1491, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1492, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1493, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1494, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1495, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1497, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1498, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1500, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1503, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1504, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1510, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1511, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1512, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1513, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1517, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1518, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1519, 2, 0, 0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[1, 1, 250.821548, 50.16431, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[2, 1, 0, 0, 0, 0, 0, 1.000013, 0, 380.0, 0, 1.1, 0.9 ],
[3, 1, 43.962271, 8.792454, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[4, 1, 72.297412, 14.459482, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[5, 1, 0, 0, 0, 0, 0, 1.00004, 0, 380.0, 0, 1.1, 0.9 ],
[6, 1, 212.295172, 42.459034, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[7, 1, 159.990811, 31.998162, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[8, 1, 133.86888, 26.773776, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[9, 1, 90.533431, 18.106686, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[10, 1, 0, 0, 0, 0, 0, 0.999294, 0, 380.0, 0, 1.1, 0.9 ],
[11, 1, 79.322719, 15.864544, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[12, 1, 0, 0, 0, 0, 0, 1.000592, 0, 380.0, 0, 1.1, 0.9 ],
[13, 1, 0, 0, 0, 0, 0, 1.000289, 0, 380.0, 0, 1.1, 0.9 ],
[14, 1, 189.710845, 37.942169, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[15, 1, 0, 0, 0, 0, 0, 1.000709, 0, 380.0, 0, 1.1, 0.9 ],
[16, 1, 323.544924, 64.708985, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[17, 1, 76.203329, 15.240666, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[18, 1, 0, 0, 0, 0, 0, 1.002083, 0, 380.0, 0, 1.1, 0.9 ],
[19, 1, 188.2697, 37.65394, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[20, 1, 0, 0, 0, 0, 0, 0.998061, 0, 380.0, 0, 1.1, 0.9 ],
[21, 1, 809.588587, 161.917717, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[22, 1, 0, 0, 0, 0, 0, 1.000299, 0, 380.0, 0, 1.1, 0.9 ],
[23, 1, 106.002596, 21.200519, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[24, 1, 0, 0, 0, 0, 0, 0.999997, 0, 380.0, 0, 1.1, 0.9 ],
[25, 1, 50.70178, 10.140356, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[26, 1, 0, 0, 0, 0, 0, 1.00065, 0, 380.0, 0, 1.1, 0.9 ],
[27, 1, 62.237839, 12.447568, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[28, 1, 183.894169, 36.778834, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[29, 1, 67.548157, 13.509631, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[30, 1, 0, 0, 0, 0, 0, 0.999715, 0, 380.0, 0, 1.1, 0.9 ],
[31, 1, 132.933028, 26.586606, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[32, 1, 0, 0, 0, 0, 0, 0.999636, 0, 380.0, 0, 1.1, 0.9 ],
[33, 1, 166.673037, 33.334607, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[34, 1, 33.067148, 6.61343, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[35, 1, 2.18922, 0.437844, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[36, 1, 7.248193, 1.449639, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[37, 1, 0, 0, 0, 0, 0, 1.003324, 0, 380.0, 0, 1.1, 0.9 ],
[38, 1, 174.625144, 34.925029, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[39, 1, 57.180738, 11.436148, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[40, 1, 59.727069, 11.945414, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[41, 1, 64.193062, 12.838612, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[42, 1, 0, 0, 0, 0, 0, 1.001174, 0, 380.0, 0, 1.1, 0.9 ],
[43, 1, 98.442954, 19.688591, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[44, 1, 125.943164, 25.188633, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[45, 1, 66.853448, 13.37069, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[46, 1, 0, 0, 0, 0, 0, 1.000161, 0, 380.0, 0, 1.1, 0.9 ],
[47, 1, 290.684158, 58.136832, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[48, 1, 199.806649, 39.96133, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[49, 1, 50.541002, 10.1082, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[50, 1, 73.59452, 14.718904, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[51, 1, 95.373694, 19.074739, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[52, 1, 0, 0, 0, 0, 0, 1.000211, 0, 380.0, 0, 1.1, 0.9 ],
[53, 1, 144.714306, 28.942861, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[54, 1, 73.523293, 14.704659, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[55, 1, 72.104864, 14.420973, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[56, 1, 0, 0, 0, 0, 0, 0.999697, 0, 380.0, 0, 1.1, 0.9 ],
[57, 1, 86.070684, 17.214137, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[58, 1, 197.157992, 39.431598, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[59, 1, 56.309528, 11.261906, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[60, 1, 29.687945, 5.937589, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[61, 1, 0, 0, 0, 0, 0, 1.000747, 0, 380.0, 0, 1.1, 0.9 ],
[62, 1, 226.334343, 45.266869, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[63, 1, 133.603225, 26.720645, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[64, 1, 1417.800972, 283.560194, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[65, 1, 4.724137, 0.944827, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[66, 1, 149.891468, 29.978294, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[67, 1, 321.542448, 64.30849, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[68, 1, 0, 0, 0, 0, 0, 0.998962, 0, 380.0, 0, 1.1, 0.9 ],
[69, 1, 0, 0, 0, 0, 0, 1.000555, 0, 380.0, 0, 1.1, 0.9 ],
[70, 1, 608.284973, 121.656995, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[71, 1, 141.357593, 28.271519, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[72, 1, 231.524339, 46.304868, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[73, 1, 74.119665, 14.823933, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[74, 1, 0, 0, 0, 0, 0, 1.001231, 0, 380.0, 0, 1.1, 0.9 ],
[75, 1, 92.37919, 18.475838, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[76, 1, 89.166187, 17.833237, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[77, 1, 86.363545, 17.272709, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[78, 1, 0, 0, 0, 0, 0, 0.998723, 0, 380.0, 0, 1.1, 0.9 ],
[79, 1, 89.177017, 17.835403, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[80, 1, 94.719751, 18.94395, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[81, 1, 106.9257, 21.38514, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[82, 1, 3.55855, 0.71171, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[83, 1, 238.093241, 47.618648, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[84, 1, 23.438811, 4.687762, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[85, 1, 81.281245, 16.256249, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[86, 1, 0, 0, 0, 0, 0, 0.999988, 0, 380.0, 0, 1.1, 0.9 ],
[87, 1, 0, 0, 0, 0, 0, 0.999275, 0, 380.0, 0, 1.1, 0.9 ],
[88, 1, 65.604737, 13.120947, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[89, 1, 81.392718, 16.278544, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[90, 1, 94.004995, 18.800999, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[91, 1, 32.652655, 6.530531, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[92, 1, 35.635501, 7.1271, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[93, 1, 34.951288, 6.990258, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[94, 1, 0, 0, 0, 0, 0, 1.001316, 0, 380.0, 0, 1.1, 0.9 ],
[95, 1, 0, 0, 0, 0, 0, 1.001163, 0, 380.0, 0, 1.1, 0.9 ],
[96, 1, 0, 0, 0, 0, 0, 0.999999, 0, 380.0, 0, 1.1, 0.9 ],
[97, 1, 4.915638, 0.983128, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[98, 1, 90.378803, 18.075761, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[99, 1, 0, 0, 0, 0, 0, 1.000995, 0, 380.0, 0, 1.1, 0.9 ],
[100, 1, 0, 0, 0, 0, 0, 1.001356, 0, 380.0, 0, 1.1, 0.9 ],
[101, 1, 63.997409, 12.799482, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[102, 1, 123.869968, 24.773994, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[103, 1, 144.827962, 28.965592, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[104, 1, 0, 0, 0, 0, 0, 0.999902, 0, 380.0, 0, 1.1, 0.9 ],
[105, 1, 0, 0, 0, 0, 0, 0.999882, 0, 380.0, 0, 1.1, 0.9 ],
[106, 1, 0, 0, 0, 0, 0, 0.999769, 0, 380.0, 0, 1.1, 0.9 ],
[107, 1, 0, 0, 0, 0, 0, 0.999995, 0, 380.0, 0, 1.1, 0.9 ],
[108, 1, 102.15847, 20.431694, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[109, 1, 41.362221, 8.272444, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[110, 1, 53.689821, 10.737964, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[111, 1, 94.615973, 18.923195, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[112, 1, 47.887609, 9.577522, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[113, 1, 75.488219, 15.097644, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[114, 1, 111.175688, 22.235138, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[115, 1, 71.66843, 14.333686, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[116, 1, 119.927261, 23.985452, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[117, 1, 0, 0, 0, 0, 0, 1.000631, 0, 380.0, 0, 1.1, 0.9 ],
[118, 1, 185.690204, 37.138041, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[119, 1, 35.994387, 7.198877, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[120, 1, 0, 0, 0, 0, 0, 1.001349, 0, 380.0, 0, 1.1, 0.9 ],
[121, 1, 48.880393, 9.776079, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[122, 1, 42.794288, 8.558858, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[123, 1, 0, 0, 0, 0, 0, 1.000223, 0, 380.0, 0, 1.1, 0.9 ],
[124, 1, 0, 0, 0, 0, 0, 1.000005, 0, 380.0, 0, 1.1, 0.9 ],
[125, 1, 0, 0, 0, 0, 0, 0.999797, 0, 380.0, 0, 1.1, 0.9 ],
[126, 1, 224.371515, 44.874303, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[127, 1, 173.462786, 34.692557, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[128, 1, 0, 0, 0, 0, 0, 1.001312, 0, 380.0, 0, 1.1, 0.9 ],
[129, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[130, 1, 239.173627, 47.834725, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[131, 1, 52.80933, 10.561866, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[132, 1, 137.507511, 27.501502, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[133, 1, 46.059629, 9.211926, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[134, 1, 45.871016, 9.174203, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[135, 1, 45.931833, 9.186367, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[136, 1, 44.495522, 8.899104, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[137, 1, 35.592322, 7.118464, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[138, 1, 0, 0, 0, 0, 0, 0.999256, 0, 380.0, 0, 1.1, 0.9 ],
[139, 1, 69.721751, 13.94435, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[140, 1, 48.215576, 9.643115, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[141, 1, 57.126948, 11.42539, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[142, 1, 62.860036, 12.572007, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[143, 1, 0, 0, 0, 0, 0, 0.999975, 0, 380.0, 0, 1.1, 0.9 ],
[144, 1, 57.258993, 11.451799, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[145, 1, 166.567926, 33.313585, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[146, 1, 214.737391, 42.947478, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[147, 1, 131.621376, 26.324275, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[148, 1, 185.741924, 37.148385, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[149, 1, 119.746474, 23.949295, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[150, 1, 156.341456, 31.268291, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[151, 1, 36.841625, 7.368325, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[152, 1, 76.479393, 15.295879, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[153, 1, 136.451676, 27.290335, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[154, 1, 140.162949, 28.03259, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[155, 1, 145.992099, 29.19842, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[156, 1, 0, 0, 0, 0, 0, 0.999991, 0, 380.0, 0, 1.1, 0.9 ],
[157, 1, 0, 0, 0, 0, 0, 1.001274, 0, 380.0, 0, 1.1, 0.9 ],
[158, 1, 38.464056, 7.692811, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[159, 1, 0, 0, 0, 0, 0, 1.001411, 0, 380.0, 0, 1.1, 0.9 ],
[160, 1, 0, 0, 0, 0, 0, 1.000004, 0, 380.0, 0, 1.1, 0.9 ],
[161, 1, 119.408868, 23.881774, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[162, 1, 178.48087, 35.696174, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[163, 1, 35.694488, 7.138898, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[164, 1, 35.838037, 7.167607, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[165, 1, 0, 0, 0, 0, 0, 0.999974, 0, 380.0, 0, 1.1, 0.9 ],
[166, 1, 41.900464, 8.380093, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[167, 1, 58.943405, 11.788681, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[168, 1, 40.228122, 8.045624, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[169, 1, 137.71246, 27.542492, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[170, 1, 103.479301, 20.69586, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[171, 1, 88.319544, 17.663909, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[172, 1, 43.344826, 8.668965, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[173, 1, 41.407138, 8.281428, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[174, 1, 62.137278, 12.427456, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[175, 1, 41.38, 8.276, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[176, 1, 144.193935, 28.838787, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[177, 1, 23.512922, 4.702584, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[178, 1, 124.530203, 24.906041, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[179, 1, 45.885082, 9.177016, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[180, 1, 40.334161, 8.066832, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[181, 1, 30.443063, 6.088613, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[182, 1, 1.379085, 0.275817, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[183, 1, 412.803513, 82.560703, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[184, 1, 0, 0, 0, 0, 0, 0.999905, 0, 380.0, 0, 1.1, 0.9 ],
[185, 1, 88.275644, 17.655129, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[186, 1, 47.535976, 9.507195, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[187, 1, 27.803705, 5.560741, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[188, 1, 41.38, 8.276, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[189, 1, 151.838663, 30.367733, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[190, 1, 200.83504, 40.167008, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[191, 1, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[192, 1, 48.367161, 9.673432, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[193, 1, 41.313251, 8.26265, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[194, 1, 28.519198, 5.70384, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[195, 1, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[196, 1, 40.010773, 8.002155, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[197, 1, 63.391759, 12.678352, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[198, 1, 37.511848, 7.50237, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[199, 1, 48.295256, 9.659051, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[200, 1, 41.38096, 8.276192, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[201, 1, 0, 0, 0, 0, 0, 0.99879, 0, 380.0, 0, 1.1, 0.9 ],
[202, 1, 42.403738, 8.480748, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[203, 1, 5.587073, 1.117415, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[204, 1, 163.755979, 32.751196, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[205, 1, 81.885361, 16.377072, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[206, 1, 39.299251, 7.85985, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[207, 1, 116.859046, 23.371809, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[208, 1, 34.410381, 6.882076, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[209, 1, 47.818405, 9.563681, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[210, 1, 54.934397, 10.986879, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[211, 1, 193.051784, 38.610357, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[212, 1, 48.385707, 9.677141, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[213, 1, 226.821377, 45.364275, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[214, 1, 152.622036, 30.524407, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[215, 1, 322.726911, 64.545382, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[216, 1, 108.819233, 21.763847, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[217, 1, 34.869546, 6.973909, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[218, 1, 106.231288, 21.246258, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[219, 1, 170.726627, 34.145325, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[220, 1, 0, 0, 0, 0, 0, 0.999706, 0, 380.0, 0, 1.1, 0.9 ],
[221, 1, 97.391535, 19.478307, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[222, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[223, 1, 96.52104, 19.304208, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[224, 1, 112.240673, 22.448135, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[225, 1, 201.534567, 40.306913, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[226, 1, 70.402252, 14.08045, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[227, 1, 87.706927, 17.541385, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[228, 1, 85.993963, 17.198793, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[229, 1, 190.289974, 38.057995, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[230, 1, 45.642403, 9.128481, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[231, 1, 0, 0, 0, 0, 0, 1.000771, 0, 380.0, 0, 1.1, 0.9 ],
[232, 1, 0, 0, 0, 0, 0, 0.999972, 0, 380.0, 0, 1.1, 0.9 ],
[233, 1, 0, 0, 0, 0, 0, 0.999813, 0, 380.0, 0, 1.1, 0.9 ],
[234, 1, 162.583315, 32.516663, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[235, 1, 52.869927, 10.573985, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[236, 1, 0, 0, 0, 0, 0, 0.999976, 0, 380.0, 0, 1.1, 0.9 ],
[237, 1, 0.437559, 0.087512, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[238, 1, 59.823284, 11.964657, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[239, 1, 82.653369, 16.530674, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[240, 1, 521.361598, 104.27232, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[241, 1, 385.789472, 77.157894, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[242, 1, 140.472928, 28.094586, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[243, 1, 113.333641, 22.666728, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[244, 1, 135.028615, 27.005723, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[245, 1, 0, 0, 0, 0, 0, 1.001496, 0, 380.0, 0, 1.1, 0.9 ],
[246, 1, 0, 0, 0, 0, 0, 0.999909, 0, 380.0, 0, 1.1, 0.9 ],
[247, 1, 26.795666, 5.359133, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[248, 1, 0, 0, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[249, 1, 0, 0, 0, 0, 0, 0.999999, 0, 380.0, 0, 1.1, 0.9 ],
[250, 1, 0, 0, 0, 0, 0, 0.999999, 0, 380.0, 0, 1.1, 0.9 ],
[251, 1, 66.500764, 13.300153, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[252, 1, 170.544038, 34.108808, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[253, 1, 74.87534, 14.975068, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[254, 1, 23.906454, 4.781291, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[255, 1, 117.569947, 23.513989, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[256, 1, 134.832271, 26.966454, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[257, 1, 65.073036, 13.014607, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[258, 1, 212.065167, 42.413033, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[259, 1, 0, 0, 0, 0, 0, 0.999516, 0, 380.0, 0, 1.1, 0.9 ],
[260, 1, 131.98103, 26.396206, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[261, 1, 0, 0, 0, 0, 0, 1.002014, 0, 380.0, 0, 1.1, 0.9 ],
[262, 1, 0, 0, 0, 0, 0, 0.999907, 0, 380.0, 0, 1.1, 0.9 ],
[263, 1, 189.326615, 37.865323, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[264, 1, 245.093515, 49.018703, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[265, 1, 0, 0, 0, 0, 0, 1.000008, 0, 380.0, 0, 1.1, 0.9 ],
[266, 1, 118.118747, 23.623749, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[267, 1, 149.394588, 29.878918, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[268, 1, 51.950829, 10.390166, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[269, 1, 41.718463, 8.343693, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[270, 1, 0, 0, 0, 0, 0, 1.000003, 0, 380.0, 0, 1.1, 0.9 ],
[271, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[272, 1, 0.85121, 0.170242, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[273, 1, 116.403411, 23.280682, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[274, 1, 226.272896, 45.254579, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[275, 1, 42.359521, 8.471904, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[276, 1, 165.128183, 33.025637, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[277, 1, 0, 0, 0, 0, 0, 0.99949, 0, 380.0, 0, 1.1, 0.9 ],
[278, 1, 128.909542, 25.781908, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[279, 1, 0, 0, 0, 0, 0, 0.999578, 0, 380.0, 0, 1.1, 0.9 ],
[280, 1, 0, 0, 0, 0, 0, 0.999443, 0, 380.0, 0, 1.1, 0.9 ],
[281, 1, 170.274067, 34.054813, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[282, 1, 240.7939, 48.15878, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[283, 1, 96.520651, 19.30413, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[284, 1, 146.426298, 29.28526, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[285, 1, 65.300992, 13.060198, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[286, 1, 136.860331, 27.372066, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[287, 1, 84.117367, 16.823473, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[288, 1, 54.103705, 10.820741, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[289, 1, 85.089435, 17.017887, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[290, 1, 0, 0, 0, 0, 0, 1.004466, 0, 380.0, 0, 1.1, 0.9 ],
[291, 1, 55.996352, 11.19927, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[292, 1, 110.394242, 22.078848, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[293, 1, 97.294621, 19.458924, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[294, 1, 25.927546, 5.185509, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[295, 1, 54.249458, 10.849892, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[296, 1, 154.014337, 30.802867, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[297, 1, 161.870796, 32.374159, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[298, 1, 85.470998, 17.0942, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[299, 1, 82.778093, 16.555619, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[300, 1, 225.50994, 45.101988, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[301, 1, 0, 0, 0, 0, 0, 0.999498, 0, 380.0, 0, 1.1, 0.9 ],
[302, 1, 189.964538, 37.992908, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[303, 1, 97.571296, 19.514259, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[304, 1, 83.78454, 16.756908, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[305, 1, 0, 0, 0, 0, 0, 0.999693, 0, 380.0, 0, 1.1, 0.9 ],
[306, 1, 0, 0, 0, 0, 0, 1.001512, 0, 380.0, 0, 1.1, 0.9 ],
[307, 1, 99.376251, 19.87525, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[308, 1, 122.517677, 24.503535, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[309, 1, 200.456148, 40.09123, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[310, 1, 0, 0, 0, 0, 0, 1.000044, 0, 380.0, 0, 1.1, 0.9 ],
[311, 1, 170.269252, 34.05385, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[312, 1, 76.57482, 15.314964, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[313, 1, 0, 0, 0, 0, 0, 1.000441, 0, 380.0, 0, 1.1, 0.9 ],
[314, 1, 237.180051, 47.43601, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[315, 1, 0, 0, 0, 0, 0, 1.001494, 0, 380.0, 0, 1.1, 0.9 ],
[316, 1, 92.930227, 18.586045, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[317, 1, 125.127147, 25.025429, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[318, 1, 205.630095, 41.126019, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[319, 1, 7.366492, 1.473298, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[320, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[321, 1, 174.25721, 34.851442, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[322, 1, 22.184065, 4.436813, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[323, 1, 2.308063, 0.461613, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[324, 1, 408.009713, 81.601943, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[325, 1, 132.910922, 26.582184, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[326, 1, 10.776005, 2.155201, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[327, 1, 92.734881, 18.546976, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[328, 1, 158.03449, 31.606898, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[329, 1, 237.697962, 47.539592, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[330, 1, 0, 0, 0, 0, 0, 1.002001, 0, 380.0, 0, 1.1, 0.9 ],
[331, 1, 18.87241, 3.774482, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[332, 1, 0, 0, 0, 0, 0, 0.998455, 0, 380.0, 0, 1.1, 0.9 ],
[333, 1, 198.297407, 39.659481, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[334, 1, 0, 0, 0, 0, 0, 0.99985, 0, 380.0, 0, 1.1, 0.9 ],
[335, 1, 202.377464, 40.475493, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[336, 1, 0, 0, 0, 0, 0, 0.998855, 0, 380.0, 0, 1.1, 0.9 ],
[337, 1, 80.499821, 16.099964, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[338, 1, 218.487954, 43.697591, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[339, 1, 135.131778, 27.026356, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[340, 1, 114.251187, 22.850237, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[341, 1, 103.285355, 20.657071, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[342, 1, 179.166106, 35.833221, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[343, 1, 98.293138, 19.658628, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[344, 1, 246.444439, 49.288888, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[345, 1, 269.47729, 53.895458, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[346, 1, 267.522248, 53.50445, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[347, 1, 93.557174, 18.711435, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[348, 1, 244.564613, 48.912923, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[349, 1, 0, 0, 0, 0, 0, 1.001796, 0, 380.0, 0, 1.1, 0.9 ],
[350, 1, 128.302166, 25.660433, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[351, 1, 0, 0, 0, 0, 0, 1.001627, 0, 380.0, 0, 1.1, 0.9 ],
[352, 1, 849.269793, 169.853959, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[353, 1, 2.553189, 0.510638, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[354, 1, 17.346143, 3.469229, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[355, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[356, 1, 0.0, 0.0, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[357, 1, 0.043481, 0.008696, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[358, 1, 0, 0, 0, 0, 0, 1.001393, 0, 380.0, 0, 1.1, 0.9 ],
[359, 1, 2.53872, 0.507744, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[360, 1, 0, 0, 0, 0, 0, 1.000745, 0, 380.0, 0, 1.1, 0.9 ],
[361, 1, 64.976237, 12.995247, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[362, 1, 185.215903, 37.043181, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[363, 1, 272.697834, 54.539567, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[364, 1, 64.339299, 12.86786, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[365, 1, 57.747938, 11.549588, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[366, 1, 114.456228, 22.891246, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[367, 1, 55.323387, 11.064677, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[368, 1, 27.242145, 5.448429, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[369, 1, 22.385785, 4.477157, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[370, 1, 65.904389, 13.180878, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[371, 1, 331.60187, 66.320374, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[372, 1, 192.300689, 38.460138, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[373, 1, 129.764644, 25.952929, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[374, 1, 66.541113, 13.308223, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[375, 1, 218.277952, 43.65559, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[376, 1, 239.409804, 47.881961, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[377, 1, 171.317957, 34.263591, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[378, 1, 170.988206, 34.197641, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[379, 1, 58.932311, 11.786462, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[380, 1, 0, 0, 0, 0, 0, 1.001423, 0, 380.0, 0, 1.1, 0.9 ],
[381, 1, 197.07324, 39.414648, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[382, 1, 0, 0, 0, 0, 0, 1.001117, 0, 380.0, 0, 1.1, 0.9 ],
[383, 1, 0, 0, 0, 0, 0, 0.999331, 0, 380.0, 0, 1.1, 0.9 ],
[384, 1, 69.542251, 13.90845, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[385, 1, 87.775969, 17.555194, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[386, 1, 70.52536, 14.105072, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[387, 1, 143.627776, 28.725555, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[388, 1, 771.279055, 154.255811, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[389, 1, 0, 0, 0, 0, 0, 0.999936, 0, 380.0, 0, 1.1, 0.9 ],
[390, 1, 63.682708, 12.736542, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[391, 1, 72.540035, 14.508007, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[392, 1, 139.203598, 27.84072, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[393, 1, 173.839249, 34.76785, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[394, 1, 62.52498, 12.504996, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[395, 1, 86.655759, 17.331152, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[396, 1, 61.377387, 12.275477, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[397, 1, 492.179039, 98.435808, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[398, 1, 213.173373, 42.634675, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[399, 1, 90.827383, 18.165477, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[400, 1, 48.391308, 9.678262, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[401, 1, 0, 0, 0, 0, 0, 1.000643, 0, 380.0, 0, 1.1, 0.9 ],
[402, 1, 0, 0, 0, 0, 0, 1.000433, 0, 380.0, 0, 1.1, 0.9 ],
[403, 1, 24.02741, 4.805482, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[404, 1, 84.650051, 16.93001, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[405, 1, 638.177697, 127.635539, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[406, 1, 48.352996, 9.670599, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[407, 1, 95.715815, 19.143163, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[408, 1, 276.75646, 55.351292, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[409, 1, 0, 0, 0, 0, 0, 0.999966, 0, 380.0, 0, 1.1, 0.9 ],
[410, 1, 35.831632, 7.166326, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[411, 1, 33.880275, 6.776055, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[412, 1, 2.379719, 0.475944, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[413, 1, 118.799841, 23.759968, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[414, 1, 10.087392, 2.017478, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[415, 1, 0, 0, 0, 0, 0, 1.000348, 0, 380.0, 0, 1.1, 0.9 ],
[416, 1, 143.655073, 28.731015, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[417, 1, 5.620949, 1.12419, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[418, 1, 117.137188, 23.427438, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[419, 1, 62.608995, 12.521799, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[420, 1, 63.034534, 12.606907, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[421, 1, 90.79964, 18.159928, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[422, 1, 66.522859, 13.304572, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[423, 1, 139.712704, 27.942541, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[424, 1, 10.072926, 2.014585, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[425, 1, 82.724138, 16.544828, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[426, 1, 6.853949, 1.37079, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[427, 1, 57.600704, 11.520141, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[428, 1, 25.826368, 5.165274, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[429, 1, 291.444433, 58.288887, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[430, 1, 155.242425, 31.048485, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[431, 1, 103.812994, 20.762599, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[432, 1, 121.351021, 24.270204, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[433, 1, 62.031407, 12.406281, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[434, 1, 32.284165, 6.456833, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[435, 1, 129.116338, 25.823268, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[436, 1, 68.933046, 13.786609, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[437, 1, 15.698778, 3.139756, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[438, 1, 42.131221, 8.426244, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[439, 1, 78.442888, 15.688578, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[440, 1, 66.292256, 13.258451, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[441, 1, 50.821896, 10.164379, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[442, 1, 67.254573, 13.450915, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[443, 1, 145.814245, 29.162849, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[444, 1, 0, 0, 0, 0, 0, 0.999997, 0, 380.0, 0, 1.1, 0.9 ],
[445, 1, 66.256308, 13.251262, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[446, 1, 30.722456, 6.144491, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[447, 1, 58.409391, 11.681878, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[448, 1, 42.924971, 8.584994, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[449, 1, 216.442237, 43.288447, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[450, 1, 132.452321, 26.490464, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[451, 1, 56.59753, 11.319506, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[452, 1, 0, 0, 0, 0, 0, 0.999998, 0, 380.0, 0, 1.1, 0.9 ],
[453, 1, 37.931326, 7.586265, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[454, 1, 26.463395, 5.292679, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[455, 1, 43.146339, 8.629268, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[456, 1, 43.146339, 8.629268, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[457, 1, 132.319001, 26.4638, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[458, 1, 125.852054, 25.170411, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[459, 1, 153.166632, 30.633326, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[460, 1, 201.292511, 40.258502, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[461, 1, 209.387861, 41.877572, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[462, 1, 64.052832, 12.810566, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[463, 1, 32.821072, 6.564214, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[464, 1, 32.860745, 6.572149, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[465, 1, 53.079085, 10.615817, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[466, 1, 43.093502, 8.6187, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[467, 1, 39.768167, 7.953633, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[468, 1, 65.204075, 13.040815, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[469, 1, 40.405659, 8.081132, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[470, 1, 102.897705, 20.579541, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[471, 1, 101.312069, 20.262414, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[472, 1, 35.435908, 7.087182, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[473, 1, 65.068776, 13.013755, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[474, 1, 33.607343, 6.721469, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[475, 1, 32.980512, 6.596102, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[476, 1, 37.273405, 7.454681, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[477, 1, 60.151214, 12.030243, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[478, 1, 75.560888, 15.112178, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[479, 1, 136.93311, 27.386622, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[480, 1, 60.020263, 12.004053, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[481, 1, 52.124375, 10.424875, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[482, 1, 59.184985, 11.836997, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[483, 1, 50.332493, 10.066499, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[484, 1, 39.458226, 7.891645, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[485, 1, 58.940146, 11.788029, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[486, 1, 542.220553, 108.444111, 0, 0, 0, 0.999633, 0, 220.0, 0, 1.1, 0.9 ],
[487, 1, 137.396182, 27.479236, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[488, 1, 395.900604, 79.180121, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[489, 1, 104.199913, 20.839983, 0, 0, 0, 1.0, 0, 380.0, 0, 1.1, 0.9 ],
[490, 1, 32.423127, 6.484625, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[491, 1, 44.582216, 8.916443, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[492, 1, 69.521972, 13.904394, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[493, 1, 89.6055, 17.9211, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[494, 1, 122.466136, 24.493227, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[495, 1, 96.402737, 19.280547, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[496, 1, 6.828367, 1.365673, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[497, 1, 853.885126, 170.777025, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[498, 1, 40.046443, 8.009289, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[499, 1, 55.898273, 11.179655, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[500, 1, 30.603647, 6.120729, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[501, 1, 51.776094, 10.355219, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[502, 1, 204.349519, 40.869904, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[503, 1, 62.584286, 12.516857, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[504, 1, 40.98313, 8.196626, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[505, 1, 290.684158, 58.136832, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[506, 1, 91.24218, 18.248436, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[507, 1, 86.790622, 17.358124, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[508, 1, 126.17457, 25.234914, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[509, 1, 166.273056, 33.254611, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[510, 1, 105.044623, 21.008925, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[511, 1, 91.631004, 18.326201, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[512, 1, 60.527936, 12.105587, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[513, 1, 33.344433, 6.668887, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[514, 1, 82.991068, 16.598214, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[515, 1, 74.032963, 14.806593, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[516, 1, 82.825465, 16.565093, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[517, 1, 38.905104, 7.781021, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[518, 1, 219.116007, 43.823201, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[519, 1, 21.565027, 4.313005, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[520, 1, 87.06636, 17.413272, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[521, 1, 78.65049, 15.730098, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[522, 1, 67.341186, 13.468237, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[523, 1, 36.248995, 7.249799, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[524, 1, 105.212389, 21.042478, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[525, 1, 125.343592, 25.068718, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[526, 1, 38.00182, 7.600364, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[527, 1, 41.723327, 8.344665, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[528, 1, 91.065064, 18.213013, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[529, 1, 116.731927, 23.346385, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[530, 1, 49.466223, 9.893245, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[531, 1, 50.294079, 10.058816, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[532, 1, 48.273548, 9.65471, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[533, 1, 43.258924, 8.651785, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[534, 1, 119.332323, 23.866465, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[535, 1, 149.39641, 29.879282, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[536, 1, 117.756566, 23.551313, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[537, 1, 39.172757, 7.834551, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[538, 1, 29.282881, 5.856576, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[539, 1, 31.070937, 6.214187, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[540, 1, 27.978014, 5.595603, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[541, 1, 72.269624, 14.453925, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[542, 1, 99.276125, 19.855225, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[543, 1, 54.224131, 10.844826, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[544, 1, 100.993205, 20.198641, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[545, 1, 217.454934, 43.490987, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[546, 1, 108.991697, 21.798339, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[547, 1, 140.87893, 28.175786, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[548, 1, 45.603092, 9.120618, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[549, 1, 38.994543, 7.798909, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[550, 1, 32.17713, 6.435426, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[551, 1, 31.017976, 6.203595, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[552, 1, 154.031779, 30.806356, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[553, 1, 1.065661, 0.213132, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[554, 1, 156.050272, 31.210054, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[555, 1, 59.456881, 11.891376, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[556, 1, 91.981774, 18.396355, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[557, 1, 195.428178, 39.085636, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[558, 1, 115.235924, 23.047185, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[559, 1, 61.673157, 12.334631, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[560, 1, 96.348061, 19.269612, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[561, 1, 52.834464, 10.566893, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[562, 1, 144.339798, 28.86796, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[563, 1, 101.482642, 20.296528, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[564, 1, 200.377758, 40.075552, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[565, 1, 151.194947, 30.238989, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[566, 1, 0.242852, 0.04857, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[567, 1, 245.774168, 49.154834, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[568, 1, 227.28164, 45.456328, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[569, 1, 159.916958, 31.983392, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[570, 1, 249.659169, 49.931834, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[571, 1, 183.818078, 36.763616, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[572, 1, 324.224399, 64.84488, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[573, 1, 94.377472, 18.875494, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[574, 1, 179.825124, 35.965025, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[575, 1, 3.379237, 0.675847, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[576, 1, 218.666144, 43.733229, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[577, 1, 241.056629, 48.211326, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[578, 1, 230.152797, 46.030559, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[579, 1, 83.966022, 16.793204, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[580, 1, 17.480476, 3.496095, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[581, 1, 0.100444, 0.020089, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[582, 1, 63.244453, 12.648891, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[583, 1, 72.539063, 14.507813, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[584, 1, 41.61944, 8.323888, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ],
[585, 1, 72.256469, 14.451294, 0, 0, 0, 1.0, 0, 220.0, 0, 1.1, 0.9 ]
])
ppc["gen"] = array([
[586, 272.0, 0, 9999, -9999, 1.0, 100, 1, 272.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[589, 63.1, 0, 9999, -9999, 1.0, 100, 1, 63.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[590, 38.0, 0, 9999, -9999, 1.0, 100, 1, 38.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[593, 11.1, 0, 9999, -9999, 1.0, 100, 1, 11.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[594, 19.0, 0, 9999, -9999, 1.0, 100, 1, 19.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[595, 1417.783693, 0, 9999, -9999, 1.0, 100, 1, 4730.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[598, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[599, 9.3, 0, 9999, -9999, 1.0, 100, 1, 9.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[601, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[602, 24.6, 0, 9999, -9999, 1.0, 100, 1, 24.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[603, 1288.292203, 0, 9999, -9999, 1.0, 100, 1, 3455.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[607, 1800.0, 0, 9999, -9999, 1.0, 100, 1, 1800.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[608, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[609, 36.4, 0, 9999, -9999, 1.0, 100, 1, 36.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[612, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[613, 85.0, 0, 9999, -9999, 1.0, 100, 1, 85.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[614, 30.0, 0, 9999, -9999, 1.0, 100, 1, 30.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[616, 29.0, 0, 9999, -9999, 1.0, 100, 1, 29.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[617, 137.0, 0, 9999, -9999, 1.0, 100, 1, 137.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[618, 33.4, 0, 9999, -9999, 1.0, 100, 1, 33.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[619, 118.0, 0, 9999, -9999, 1.0, 100, 1, 118.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[621, 765.0, 0, 9999, -9999, 1.0, 100, 1, 765.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[624, 27.0, 0, 9999, -9999, 1.0, 100, 1, 27.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[629, 75.3, 0, 9999, -9999, 1.0, 100, 1, 75.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[632, 45.1, 0, 9999, -9999, 1.0, 100, 1, 45.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[637, 53.7, 0, 9999, -9999, 1.0, 100, 1, 53.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[638, 128.7, 0, 9999, -9999, 1.0, 100, 1, 128.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[640, 12.0, 0, 9999, -9999, 1.0, 100, 1, 12.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[641, 12.6, 0, 9999, -9999, 1.0, 100, 1, 12.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[642, 28.9, 0, 9999, -9999, 1.0, 100, 1, 28.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[643, 857.0, 0, 9999, -9999, 1.0, 100, 1, 857.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[647, 14.0, 0, 9999, -9999, 1.0, 100, 1, 14.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[650, 1324.5, 0, 9999, -9999, 1.0, 100, 1, 1324.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[652, 46.9, 0, 9999, -9999, 1.0, 100, 1, 46.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[655, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[661, 32.7, 0, 9999, -9999, 1.0, 100, 1, 32.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[663, 15.0, 0, 9999, -9999, 1.0, 100, 1, 15.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[666, 28.9, 0, 9999, -9999, 1.0, 100, 1, 28.9, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[668, 766.0, 0, 9999, -9999, 1.0, 100, 1, 766.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[670, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[672, 33.1, 0, 9999, -9999, 1.0, 100, 1, 33.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[676, 370.0, 0, 9999, -9999, 1.0, 100, 1, 370.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[681, 40.1, 0, 9999, -9999, 1.0, 100, 1, 40.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[683, 27.5, 0, 9999, -9999, 1.0, 100, 1, 27.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[687, 1329.0, 0, 9999, -9999, 1.0, 100, 1, 1329.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[691, 26.0, 0, 9999, -9999, 1.0, 100, 1, 26.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[693, 194.0, 0, 9999, -9999, 1.0, 100, 1, 194.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[694, 16.4, 0, 9999, -9999, 1.0, 100, 1, 16.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[695, 14.7, 0, 9999, -9999, 1.0, 100, 1, 14.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[696, 721.0, 0, 9999, -9999, 1.0, 100, 1, 721.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[697, 11.6, 0, 9999, -9999, 1.0, 100, 1, 11.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[698, 24.0, 0, 9999, -9999, 1.0, 100, 1, 24.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[702, 73.4, 0, 9999, -9999, 1.0, 100, 1, 73.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[704, 508.0, 0, 9999, -9999, 1.0, 100, 1, 508.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[705, 17.0, 0, 9999, -9999, 1.0, 100, 1, 17.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[707, 34.0, 0, 9999, -9999, 1.0, 100, 1, 34.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[713, 13.4, 0, 9999, -9999, 1.0, 100, 1, 13.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[714, 15.0, 0, 9999, -9999, 1.0, 100, 1, 15.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[716, 0.1, 0, 9999, -9999, 1.0, 100, 1, 0.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[717, 11.0, 0, 9999, -9999, 1.0, 100, 1, 11.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[719, 1267.007238, 0, 9999, -9999, 1.0, 100, 1, 1958.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[722, 20.7, 0, 9999, -9999, 1.0, 100, 1, 20.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[724, 12.1, 0, 9999, -9999, 1.0, 100, 1, 12.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[727, 61.5, 0, 9999, -9999, 1.0, 100, 1, 61.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[728, 510.0, 0, 9999, -9999, 1.0, 100, 1, 510.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[730, 633.2, 0, 9999, -9999, 1.0, 100, 1, 633.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[732, 14.6, 0, 9999, -9999, 1.0, 100, 1, 14.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[735, 84.8, 0, 9999, -9999, 1.0, 100, 1, 84.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[738, 138.5, 0, 9999, -9999, 1.0, 100, 1, 138.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[741, 214.0, 0, 9999, -9999, 1.0, 100, 1, 214.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[742, 9.0, 0, 9999, -9999, 1.0, 100, 1, 9.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[743, 941.074841, 0, 9999, -9999, 1.0, 100, 1, 1410.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[746, 100.0, 0, 9999, -9999, 1.0, 100, 1, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[747, 12.5, 0, 9999, -9999, 1.0, 100, 1, 12.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[748, 110.0, 0, 9999, -9999, 1.0, 100, 1, 110.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[749, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[750, 90.8, 0, 9999, -9999, 1.0, 100, 1, 90.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[753, 311.8, 0, 9999, -9999, 1.0, 100, 1, 311.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[758, 18.5, 0, 9999, -9999, 1.0, 100, 1, 18.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[760, 311.879159, 0, 9999, -9999, 1.0, 100, 1, 794.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[762, 1095.053571, 0, 9999, -9999, 1.0, 100, 1, 1105.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[763, 20.3, 0, 9999, -9999, 1.0, 100, 1, 20.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[765, 59.0, 0, 9999, -9999, 1.0, 100, 1, 59.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[767, 11.2, 0, 9999, -9999, 1.0, 100, 1, 11.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[769, 43.3, 0, 9999, -9999, 1.0, 100, 1, 43.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[771, 690.0, 0, 9999, -9999, 1.0, 100, 1, 690.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[772, 18.8, 0, 9999, -9999, 1.0, 100, 1, 18.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[774, 33.5, 0, 9999, -9999, 1.0, 100, 1, 33.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[777, 79.0, 0, 9999, -9999, 1.0, 100, 1, 79.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[778, 14.7, 0, 9999, -9999, 1.0, 100, 1, 14.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[781, 953.204395, 0, 9999, -9999, 1.0, 100, 1, 1310.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[784, 769.155798, 0, 9999, -9999, 1.0, 100, 1, 1275.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[785, 3.0, 0, 9999, -9999, 1.0, 100, 1, 3.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[787, 778.0, 0, 9999, -9999, 1.0, 100, 1, 778.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[788, 875.0, 0, 9999, -9999, 1.0, 100, 1, 875.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[789, 77.4, 0, 9999, -9999, 1.0, 100, 1, 77.4, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[791, 10.0, 0, 9999, -9999, 1.0, 100, 1, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[792, 62.7, 0, 9999, -9999, 1.0, 100, 1, 62.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[795, 13.6, 0, 9999, -9999, 1.0, 100, 1, 13.6, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[800, 36.5, 0, 9999, -9999, 1.0, 100, 1, 36.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[801, 50.0, 0, 9999, -9999, 1.0, 100, 1, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[802, 500.0, 0, 9999, -9999, 1.0, 100, 1, 500.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[805, 682.832189, 0, 9999, -9999, 1.0, 100, 1, 1410.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[806, 35.8, 0, 9999, -9999, 1.0, 100, 1, 35.8, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[808, 217.5, 0, 9999, -9999, 1.0, 100, 1, 217.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[809, 12.5, 0, 9999, -9999, 1.0, 100, 1, 12.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[811, 25.2, 0, 9999, -9999, 1.0, 100, 1, 25.2, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[814, 89.0, 0, 9999, -9999, 1.0, 100, 1, 89.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[816, 80.1, 0, 9999, -9999, 1.0, 100, 1, 80.1, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[817, 54.0, 0, 9999, -9999, 1.0, 100, 1, 54.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[818, 734.814817, 0, 9999, -9999, 1.0, 100, 1, 757.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[821, 82.5, 0, 9999, -9999, 1.0, 100, 1, 82.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[822, 134.0, 0, 9999, -9999, 1.0, 100, 1, 134.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[826, 58.0, 0, 9999, -9999, 1.0, 100, 1, 58.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[830, 89.0, 0, 9999, -9999, 1.0, 100, 1, 89.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[834, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[835, 63.7, 0, 9999, -9999, 1.0, 100, 1, 63.7, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[836, 25.5, 0, 9999, -9999, 1.0, 100, 1, 25.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[837, 472.0, 0, 9999, -9999, 1.0, 100, 1, 472.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[839, 73.3, 0, 9999, -9999, 1.0, 100, 1, 73.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[841, 23.3, 0, 9999, -9999, 1.0, 100, 1, 23.3, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[843, 333.0, 0, 9999, -9999, 1.0, 100, 1, 333.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[844, 40.0, 0, 9999, -9999, 1.0, 100, 1, 40.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[845, 318.0, 0, 9999, -9999, 1.0, 100, 1, 318.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[849, 779.0, 0, 9999, -9999, 1.0, 100, 1, 779.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[850, 16.0, 0, 9999, -9999, 1.0, 100, 1, 16.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[851, 79.5, 0, 9999, -9999, 1.0, 100, 1, 79.5, 0.0, 0, 0, 0, 0, 0, 0, 0, | |
"""Hey ACPlayGames, going through and using your comment format was a pain but I tried my best :hugging:."""
"""Functions for retriving schedule and day info of a day in the semester."""
import csv
from collections import namedtuple
from datetime import date, timedelta
from datetime import datetime as dt
from datetime import time
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
from stuy_utils import errors
Info = namedtuple("Info", ("school", "cycle", "schedule", "testing", "events"))
Time = namedtuple("Time", ("start", "end"))
TERM_PATH = f"{Path(__file__).parent}/data/term-days-2022.tsv"
REGULAR_BELLS_PATH = f"{Path(__file__).parent}/data/regular.tsv"
CONFERENCE_BELLS_PATH = f"{Path(__file__).parent}/data/conference.tsv"
HOMEROOM_BELLS_PATH = f"{Path(__file__).parent}/data/homeroom.tsv"
PTC_BELLS_PATH = f"{Path(__file__).parent}/data/ptc.tsv"
def convert_12h_to_24h(hours12: str) -> str:
"""Converts a 12-hour time to a 24-hour time.
Converts a 12-hour time to a 24-hour time by adding 12 hours to the
hour if the time is in the PM.
Args:
hours12: A string representing a 12-hour time.
e.g "1:00 PM"
Raises:
errors.InvalidTime: Thrown if the input is not a string.
errors.InvalidTime: Thrown if the input isn't a 12 hour time (i.e. doesn't contain AM or PM, or hours > 12).
Returns:
str: A string representing a 24 hour time, with 0 prepended to the front of the time if the hour is less than 10.
"""
if not isinstance(hours12, str):
raise errors.InvalidTime(hours12)
if "AM" in hours12 or "PM" in hours12:
hours12time = hours12.split(" ")[0]
am_pm = hours12.split(" ")[1]
else:
raise errors.InvalidTime(hours12)
if ":" not in hours12:
raise errors.InvalidTime(hours12)
hours, minutes = hours12time.split(":")
if int(hours) > 12:
raise errors.InvalidTime(hours12)
# Account for 12:00 AM being 0 hours
if hours == "12":
hours = "0"
if am_pm == "PM":
hours = str(int(hours) + 12)
if int(hours) < 10:
hours = f"0{hours}"
return f"{hours}:{minutes}"
def convert_24h_to_minutes(hours24: str) -> int:
"""Convert a 24-hour time to minutes.
Converts a 24-hour time to minutes by converting the hours and minutes
Args:
hours24: A string representing a 24-hour time.
e.g "13:00"
Raises:
errors.InvalidTime: Thrown if the input is not a string.
errors.InvalidTime: Thrown if the input isn't a 24 hour time (i.e. doesn't contain :, or hours > 24).
Returns:
int: The number of minutes since midnight.
"""
if not isinstance(hours24, str):
raise errors.InvalidTime(hours24)
if ":" not in hours24:
raise errors.InvalidTime(hours24)
hours, minutes = hours24.split(":")
if int(hours) > 24:
raise errors.InvalidTime(hours24)
return int(hours) * 60 + int(minutes)
with open(TERM_PATH, "r") as term_tsv, open(REGULAR_BELLS_PATH, "r") as regular_tsv, open(CONFERENCE_BELLS_PATH,
"r") as conference_tsv, open(
HOMEROOM_BELLS_PATH, "r") as homeroom_tsv:
TERM_DAYS = {row[0]: Info(*row[1:]) for row in list(csv.reader(term_tsv, delimiter="\t"))[1:]}
REGULAR_BELL_SCHEDULE = {row[0]: Time(*[time.fromisoformat(convert_12h_to_24h(element)) for element in row[1:]]) for row in
list(csv.reader(regular_tsv, delimiter="\t"))[1:]}
CONFERENCE_BELL_SCHEDULE = {row[0]: Time(*[time.fromisoformat(convert_12h_to_24h(element)) for element in row[1:]]) for row in
list(csv.reader(conference_tsv, delimiter="\t"))[1:]}
HOMEROOM_BELL_SCHEDULE = {row[0]: Time(*[time.fromisoformat(convert_12h_to_24h(element)) for element in row[1:]]) for row in
list(csv.reader(homeroom_tsv, delimiter="\t"))[1:]}
PTC_BELL_SCHEDULE = {row[0]: Time(*[time.fromisoformat(convert_12h_to_24h(element)) for element in row[1:]]) for row in
list(csv.reader(homeroom_tsv, delimiter="\t"))[1:]}
def convert_to_isoformat(day: Union[date, dt]) -> str:
"""Convert a date object to an ISO-formatted date string.
Convert a date or datetime object from the datetime library to a string
formatted using the ISO 8601 format, while also checking if 'date' is a
valid date and if it exists in the data.
Args:
day (Union[datetime.date, datetime.datetime]): A date or datetime
object from the datetime library.
Raises:
errors.InvalidDate: Thrown if the input is not a date or a datetime
object.
errors.DayNotInData: Thrown if the inputted day is not in
term_days.csv.
Returns:
str: A date using the ISO 8601 format (yyyy-mm-dd).
"""
if not isinstance(day, date):
raise errors.InvalidDate(day)
if isinstance(day, dt):
day = day.date() # Converts datetime to date to remove time
iso_date = day.isoformat()
# if this code is not commented out, it will throw errors since TERM_DAYS is nonexistent.
"""
if iso_date not in TERM_DAYS:
raise errors.DayNotInData(iso_date)
"""
return iso_date
def get_day_info(day: Union[date, dt]) -> Info:
"""Returns information about a given day.
Returns the cycle, period, testing subjects, and any events of a given
day. If a category does not apply, a value of None is returned.
Args:
day (Union[datetime.date, datetime.datetime]): A date or datetime
object from the datetime library.
Raises:
errors.InvalidDate: Thrown if the input is not a date or a datetime
object.
errors.DayNotInData: Thrown if the inputted day is not in
term_days.csv.
Returns:
Info: A namedtuple with fields 'school', 'cycle', 'schedule', 'testing', and 'events'.
"""
if not isinstance(day, date):
raise errors.InvalidDate(day)
if isinstance(day, dt):
day = day.date() # Converts datetime to date to remove time
iso_date = day.isoformat()
if iso_date not in TERM_DAYS:
raise errors.DayNotInData(iso_date)
ret_tuple = Info(school=True if TERM_DAYS[iso_date][0] == "True" else False,
cycle=TERM_DAYS[iso_date][1] if TERM_DAYS[iso_date][1] != "None" else None,
schedule=TERM_DAYS[iso_date][2] if TERM_DAYS[iso_date][2] != "None" else None,
testing=TERM_DAYS[iso_date][3] if TERM_DAYS[iso_date][3] != "None" else None,
events=TERM_DAYS[iso_date][4] if TERM_DAYS[iso_date][4] != "None" else None, )
return ret_tuple
def get_next_school_day(day: Union[date, dt], always_same: bool = False) -> Optional[date]:
"""Returns when the next school day is.
Returns a date object of the next school day from the given day. The given
datetime will be returned as a date if school is still in session.
Args:
day (Union[datetime.date, datetime.datetime]): A date or datetime
object from the datetime library.
always_same (bool, optional): Whether or not to always return the given
day if the given day is a school day. Defaults to False.
Raises:
errors.InvalidDate: Thrown if the input is not a datetime object.
errors.DayNotInData: Thrown if the inputted day is not in
term_days.csv.
Returns:
Optional[datetime.date]: A date object with the year, month, and day
of the next school day.
"""
if not isinstance(day, date):
raise errors.InvalidDate(day)
if isinstance(day, dt):
day = day.date() # Converts datetime to date to remove time
iso_date = day.isoformat()
if iso_date not in TERM_DAYS:
raise errors.DayNotInData(iso_date)
if TERM_DAYS[iso_date][0] == "True":
return day
if always_same:
return day
next_day = day + timedelta(days=1)
while get_day_info(next_day).school is False:
if next_day.isoformat() not in TERM_DAYS:
return None
next_day = next_day + timedelta(days=1)
return next_day
def get_bell_schedule(day: Union[date, dt], this_day: bool = False) -> Dict[str, Time]:
"""Returns the bell periods of the next school day.
Returns a dictionary of bell periods of the next school day. If the given
day is a school day, then the bell schedule of that day will be returned,
even if it is afterschool. ⬅️!?!
Args:
day (Union[datetime.date, datetime.datetime]): A date or datetime
object from the datetime library.
Raises:
errors.InvalidDate: Thrown if the input is not a datetime object.
errors.DayNotInData: Thrown if the inputted day is not in
term_days.csv.
Returns:
Dict[str, Time]: A dictionary of keys of strings of the category name
(see data/bell_schedule.csv) and values of Time namedtuple objects with
fields 'start' and 'end', which returns a datetime object.
"""
if not isinstance(day, date):
raise errors.InvalidDate(day)
if isinstance(day, dt):
day = day.date() # Converts datetime to date to remove time
iso_date = day.isoformat()
if iso_date not in TERM_DAYS:
raise errors.DayNotInData(iso_date)
if TERM_DAYS[iso_date][0] == "True":
if TERM_DAYS[iso_date][2] == "None":
# should never happen, but return regular bell schedule if it does
return None
else:
if TERM_DAYS[iso_date][2] == "Regular":
return REGULAR_BELL_SCHEDULE
elif TERM_DAYS[iso_date][2] == "Conference":
return CONFERENCE_BELL_SCHEDULE
elif TERM_DAYS[iso_date][2] == "Homeroom":
return HOMEROOM_BELL_SCHEDULE
elif TERM_DAYS[iso_date][2] == "PTC":
return PTC_BELL_SCHEDULE
else:
return None
else:
if this_day:
return None
else:
# next day, use get_next_school_day
next_day = get_next_school_day(day)
return get_bell_schedule(next_day)
def get_current_class(day: dt) -> Optional[Tuple[str, Time]]:
"""Returns information of the current class.
Returns a tuple of information of the current class, where the first
element is a string of the category, such as the class period, and a Time
namedtuple object, which includes when said period starts and ends.
Args:
day (datetime.datetime): A datetime object from the datetime library.
Raises:
errors.InvalidDate: Thrown if the input is not a datetime object.
errors.DayNotInData: Thrown if the inputted day is not in
term_days.csv.
Returns:
Optional[Tuple[str, Time]]: A tuple of a string of the category name
(see data/bell_schedule.csv), and a Time namedtuple object with fields
'start' and 'end', which returns a datetime object.
"""
schedule = get_bell_schedule(day)
day = day.time()
for item in schedule.items():
if item[1].start <= day <= item[1].end:
return item
return None
def get_next_class(day: dt, skip_passing: bool = False) -> Optional[Tuple[str, Time]]:
"""Returns information of the next class.
Returns a tuple of information | |
import os
import time
import datetime
import shutil
import requests
import re
import yaml
import base64
import codecs
from collections import OrderedDict
import logging
from flask import current_app, flash, render_template, abort
from flask.templating import TemplateNotFound
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.sql.functions import count
from emonitor.modules.alarms import alarmutils
from emonitor.modules.alarms.alarmtype import AlarmType
from emonitor.modules.alarmobjects.alarmobject import AlarmObject
from emonitor.modules.alarmkeys.alarmkey import Alarmkey
from emonitor.modules.alarms.alarmhistory import AlarmHistory
from emonitor.modules.alarms.alarmattribute import AlarmAttribute
from emonitor.modules.alarms.alarmfield import AlarmField, AFBasic
from emonitor.modules.maps.map import Map
from emonitor.modules.settings.settings import Settings
from emonitor.modules.streets.street import Street
from emonitor.modules.streets.city import City
from emonitor.modules.settings.department import Department
from emonitor.modules.cars.car import Car
from emonitor.extensions import babel, db, events, scheduler, signal
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
USE_NOMINATIM = 0
LASTALARM = 0.0 # timestamp ini millies
class Alarm(db.Model):
"""Alarm class"""
__tablename__ = 'alarms'
__table_args__ = {'extend_existing': True}
ALARMSTATES = {'0': 'created', '1': 'active', '2': 'done'}
"""
- 0: alarm *created*
- 1: alarm *active*
- 2: alarm *done*
- 3: alarm *archived* (not in list, only for admin area)
"""
ALARMTYPES = {'1': 'fax', '2': 'manual'}
"""
- 1: alarm by *fax* created
- 2: alarm *manual* created
"""
ROUTEURL = "http://www.yournavigation.org/api/1.0/gosmore.php"
"""
URL for routing webservice
"""
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DATETIME)
_key = db.Column('key', db.Text)
type = db.Column(db.Integer, default=0)
state = db.Column(db.Integer, default=0)
attributes = db.relationship("AlarmAttribute", collection_class=attribute_mapped_collection('name'), cascade="all, delete-orphan")
history = db.relationship(AlarmHistory.__name__, backref="alarms", lazy='joined', cascade="all, delete-orphan")
# additional properties defined in alarmutils
endtimestamp = property(alarmutils.get_endtimestamp)
cars1 = property(alarmutils.get_cars1)
cars2 = property(alarmutils.get_cars2)
material = property(alarmutils.get_material, alarmutils.set_material)
city = property(alarmutils.get_city, alarmutils.set_city)
key = property(alarmutils.get_key)
street = property(alarmutils.get_street, alarmutils.set_street)
street2 = property(alarmutils.get_street2)
streetno = property(alarmutils.get_streetno)
housenumber = property(alarmutils.get_housenumber)
object = property(alarmutils.get_object, alarmutils.set_object)
person = property(alarmutils.get_person)
priority = property(alarmutils.get_priority)
remark = property(alarmutils.get_remark)
lat = property(alarmutils.get_lat)
lng = property(alarmutils.get_lng)
zoom = property(alarmutils.get_zoom)
position = property(alarmutils.get_position, alarmutils.set_position)
marker = property(alarmutils.get_marker)
def __init__(self, timestamp, key, type, state):
self.timestamp = timestamp
self._key = key
self.type = type # 1:automatic/fax, 2: manual
self.state = state # 1: active, 0:created, 2:done, 3:archived
def get(self, attrname, default=''):
"""
Getter for attribute names
:param attrname: name of attribute
:param optional default: deliver default value if not stored in database
:return: value of attribute
"""
if attrname in self.attributes:
return self.attributes[attrname].value
return default
def set(self, attrname, value):
"""
Setter for attributes
:param attrname: attribute name
:param value: value
"""
if attrname in self.attributes:
self.attributes[attrname].value = value
else:
self.attributes[attrname] = AlarmAttribute(attrname, value)
def addHistory(self, name, value, dtime=None):
"""
Add history entry for alarm to store actions of alarm using
:py:class:`emonitor.modules.alarms.alarmhistory.AlarmHistory`
:param name: name of event
:param value: value of history entry
:param optional dtime: timestamp of history entry (now)
"""
if not dtime:
dtime = datetime.datetime.now()
self.history.append(AlarmHistory(name, value, dtime))
def getAdditionalLayers(self):
"""
Get additional layers of default map definition of current alarm
:return: list of :py:class:`emonitor.modules.mapitems.mapitem.MapItem` objects
"""
cat = self.key.category
items = []
for itemtype in self.getMap().getMapItemDefinitions():
for r in itemtype['key']:
regex = re.compile(r)
if regex.search(cat):
items.append(itemtype)
return items
def updateSchedules(self, reference=0):
"""
set scheduler events for current alarm:
* autoclose
* autoarchive
:param reference: 0 (default)= time.time()
1 = alarm.timestamp
"""
for job in scheduler.get_jobs(): # remove schedules of current alarm
if job.name.startswith('alarms_') and job.name.endswith('_{}'.format(self.id)):
scheduler.remove_job(job.id)
if reference == 0:
reference = time.time()
else:
reference = time.mktime(self.timestamp.timetuple())
# autoclose
if self.state == 1 and self.type == 1 and Settings.get('alarms.autoclose', '0') != '0': # only for open alarms
closingtime = reference + float(Settings.get('alarms.autoclose', 30)) * 60.0 # minutes -> seconds
if closingtime > time.time(): # close alarm in future
logger.debug("add close schedule in future for alarmid {}".format(self.id))
scheduler.add_job(Alarm.changeState, run_date=datetime.datetime.fromtimestamp(closingtime), args=[self.id, 2], name="alarms_close_{}".format(self.id))
else: # close alarm now
logger.debug("add close schedule now for alarmid {}".format(self.id))
scheduler.add_job(Alarm.changeState, args=[self.id, 2], name="alarms_close_{}".format(self.id))
self.state = 2
# autoarchive
if self.state == 2 and Settings.get('alarms.autoarchive', '0') != '0': # only closed alarms
archivetime = reference + float(Settings.get('alarms.autoarchive', 12)) * 3600.0 # hours -> seconds
if archivetime > time.time(): # archive alarm in future
logger.debug("add archive schedule in future for alarmid {}".format(self.id))
scheduler.add_job(Alarm.changeState, run_date=datetime.datetime.fromtimestamp(archivetime), args=[self.id, 3], name="alarms_archive_{}".format(self.id))
else: # archive alarm now
logger.debug("add archive schedule now for alarmid {}".format(self.id))
scheduler.add_job(Alarm.changeState, args=[self.id, 3], name="alarms_archive_{}".format(self.id))
def getDepartment(self):
if self.street.city:
return Department.getDepartments(id=self.street.city.dept)
else:
Department.getDefaultDepartment()
def getAlarmFields(self):
if self.street.city:
fields = AlarmField.getAlarmFields(dept=self.street.city.dept)
else:
fields = AlarmField.getAlarmFields(dept=Department.getDefaultDepartment().id)
return fields
def getFieldValue(self, field):
value = field
if '-' in field:
value = AlarmField.getAlarmFields(fieldtype=field.split('-')[0]).getFieldValue(field.split('-')[1], self)
elif field.startswith('basic.'):
value = AFBasic().getFieldValue(field, self)
elif field.startswith('alarm.'):
if field == 'alarm.key':
if self.key.id:
return "{}: {}".format(self.key.category, self.key.key)
return self.key.key
elif field == 'alarm.date':
return self.timestamp.strftime("%d.%m.%Y")
elif field == 'alarm.time':
return self.timestamp.strftime("%H:%M")
else:
value = field
return value
@staticmethod
def getMap():
"""
Returns default map defined in eMonitor
:return: :py:class:`emonitor.modules.maps.map.Map`
"""
return Map.getDefaultMap()
@staticmethod
def getAlarms(id=0, days=0, state=-1):
"""
Get list of alarm objects filtered by parameters
:param optional id: id of alarm or 0 for all alarms
:param optional days: number of days since alarmdate
:param optional state: -1 for alarms of all states, see :py:attr:`emonitor.modules.alarms.alarm.Alarm.ALARMSTATES` for value definition
:return: list or single object :py:class:`emonitor.modules.alarms.alarm.Alarm`
"""
if id != 0:
return Alarm.query.filter_by(id=id).first()
elif days != 0: # filter last days, 0 = all days
if int(state) == -1:
return Alarm.query.filter(Alarm.timestamp > (datetime.datetime.now() - datetime.timedelta(days=days))).order_by(Alarm.timestamp.desc()).all()
else:
return Alarm.query.filter(Alarm.timestamp > (datetime.datetime.now() - datetime.timedelta(days=days)), Alarm.state == state).order_by(Alarm.timestamp.desc()).all()
else:
if int(state) == -1: # all states
return Alarm.query.order_by(Alarm.timestamp.desc()).all()
else:
return Alarm.query.filter(Alarm.state == state).order_by(Alarm.timestamp.desc()).all()
@staticmethod
def getAlarmCount(days=0):
"""
Get number of alarms, grouped by state
:param optional days: 0 for all alarms, since days else
:return: list grouped by state
"""
if days != 0:
return db.get(Alarm.state, count(Alarm.id)).filter(Alarm.timestamp > (datetime.datetime.now() - datetime.timedelta(days=days))).order_by(Alarm.timestamp.desc()).group_by(Alarm.timestamp, Alarm.state).all()
else:
return db.get(Alarm.state, count(Alarm.id)).group_by(Alarm.state).all()
@staticmethod
def getActiveAlarms():
"""
Get list of all active alarms
:return: list or :py:class:`emonitor.modules.alarms.alarm.Alarm`
"""
from sqlalchemy.exc import OperationalError
try:
return Alarm.query.filter_by(state=1).order_by(Alarm.timestamp.desc()).all()
except OperationalError:
return []
@staticmethod
def changeStates(state):
"""
Set states of ALL alarms to given state
:param state: state as :py:attr:`emonitor.modules.alarms.alarm.Alarm.ALARMSTATES`
"""
for alarm in Alarm.getAlarms(0):
Alarm.changeState(alarm.id, state)
def getRouting(self):
if self.get('routing', '') == "": # load from webservice if not stored
routingdata = alarmutils.getAlarmRoute(self)
if len(routingdata['coordinates']) > 0:
self.set('routing', yaml.safe_dump(routingdata, encoding="UTF-8"))
db.session.commit()
data = yaml.load(self.get('routing'))
if 'error' in data:
data['errormessage'] = babel.gettext(u'alarms.routingerror')
return data
@staticmethod
def changeState(id, state):
"""
Change state of alarm with given id. Adds entry in alarmhistory and sends signal
:param id: id of alarm
:param state: new state as :py:attr:`emonitor.modules.alarms.alarm.Alarm.ALARMSTATES`
"""
from emonitor.extensions import monitorserver
global LASTALARM
alarm = Alarm.getAlarms(id=id)
if not alarm:
return []
if alarm.state != state and alarm.state != 0: # only change
_op = 'changestate'
else:
_op = 'added'
if alarm.get('alarmtype', '') != '':
_type = '.{}'.format(alarm.get('alarmtype'))
else:
_type = ''
alarm.state = state
try:
alarm.addHistory('autochangeState', Alarm.ALARMSTATES[str(state)])
except KeyError:
alarm.addHistory('autochangeState', 'archived')
db.session.commit()
if state == 1: # activate alarm
c = []
for a in Alarm.getActiveAlarms(): # check cars
if a.id == id:
continue
c.extend(set(a.cars1).intersection(set(alarm.cars1)))
c.extend(set(a.cars2).intersection(set(alarm.cars2)))
c.extend(set(a.material).intersection(set(alarm.material)))
if time.time() - LASTALARM < 60.0:
try:
ids = [a.id for a in Alarm.getActiveAlarms()]
for j in [job for job in scheduler.get_jobs() if job.name == 'changeLayout']:
for i in ids:
if "'alarmid', %s" % i in str(j.args): # layout changes for given alarm
scheduler.unschedule_job(j)
except:
logger.error('%s' % [a.id for a in Alarm.getActiveAlarms()])
LASTALARM = time.time()
alarm.updateSchedules(reference=0) # use current time + delta
j = scheduler.add_job(events.raiseEvent, next_run_time=datetime.datetime.fromtimestamp(LASTALARM), args=['alarm_{}{}'.format(_op, _type)], kwargs={'alarmid': id}, name="alarms_activate_{}".format(id))
signal.send('alarm', _op, alarmid=id, newstate=state)
try:
flash(babel.gettext(u'alarms.statechangeactivated'), 'alarms.activate')
except:
pass
finally:
monitorserver.sendMessage('0', 'reset') # refresh monitor layout
return list(set(c))
elif state == 2: # close alarm
LASTALARM = 0.0
alarm.updateSchedules(reference=1) # use alarm.timestamp + delta
monitorserver.sendMessage('0', 'reset') # refresh monitor layout
signal.send('alarm', 'changestate', newstate=2)
return []
elif state == 3: # archive alarm
alarm.updateSchedules()
signal.send('alarm', 'changestate', newstate=3)
return []
signal.send('alarm', 'changestate', newstate=state)
@staticmethod
def getExportData(exportformat, **params):
"""
Export alarm to given format
:param exportformat: *.html*, *.png*, *telegram*, [*mail*]
:param params:
- style: exportstyle: *alarmmap*, *routemap*, *telegram.text*, *telegram.venue*
- filename: name of exportfile
:return: alarm in correct format
"""
if params['id'] and params:
alarm = Alarm.getAlarms(id=params['id'])
logger.debug('load export data for alarm {}, style {}, exportformat {}'.format(params['id'], params['style'], exportformat))
if not alarm: # create dummy alarm
alarm = Alarm(datetime.datetime.now(), '', 2, 0)
alarm.position = dict(lat=Settings.get('defaultLat'), lng=Settings.get('defaultLng'))
alarm.set('id.key', '1')
alarm.set('id.address', '1')
alarm.set('id.city', '1')
alarm.set('remark', 'TEST TEST TEST')
if alarm:
if exportformat | |
be added to the resource
and made part of this aggregation
"""
for fl in files_to_add:
uploaded_file = UploadedFile(file=open(fl, 'rb'),
name=os.path.basename(fl))
new_res_file = add_file_to_resource(
resource, uploaded_file, folder=upload_folder, add_to_aggregation=False
)
# make each resource file we added part of the logical file
self.add_resource_file(new_res_file)
def add_resource_files_in_folder(self, resource, folder):
"""
A helper for creating aggregation. Makes all resource files in a given folder as part of
the aggregation/logical file type
:param resource: an instance of CompositeResource
:param folder: folder from which all files need to be made part of this aggregation
"""
res_files = ResourceFile.list_folder(resource=resource, folder=folder,
sub_folders=False)
for res_file in res_files:
self.add_resource_file(res_file)
return res_files
def copy_resource_files(self, resource, files_to_copy, tgt_folder):
"""
A helper for creating aggregation. Copies the given list of resource files to the the
specified folder path and then makes those copied files as part of the aggregation
:param resource: an instance of CompositeResource for which aggregation being created
:param files_to_copy: a list of resource file paths in irods that need to be copied
to a specified directory *tgt_folder* and made part of this aggregation
:param tgt_folder: folder to which files need to be copied to
"""
for res_file in files_to_copy:
source_path = res_file.storage_path
copied_res_file = ResourceFile.create(resource=resource,
file=None,
folder=tgt_folder,
source=source_path)
# make the copied file as part of the aggregation/file type
self.add_resource_file(copied_res_file)
def get_copy(self, copied_resource):
"""creates a copy of this logical file object with associated metadata needed to support
resource copy.
:param copied_resource: a copy of the resource for which a copy of aggregation needs to be
created
Note: This copied logical file however does not have any association with resource files
"""
copy_of_logical_file = type(self).create(copied_resource)
copy_of_logical_file.dataset_name = self.dataset_name
copy_of_logical_file.metadata.extra_metadata = copy.deepcopy(self.metadata.extra_metadata)
copy_of_logical_file.metadata.keywords = self.metadata.keywords
copy_of_logical_file.metadata.save()
copy_of_logical_file.save()
# copy the metadata elements
elements_to_copy = self.metadata.get_metadata_elements()
for element in elements_to_copy:
element_args = model_to_dict(element)
element_args.pop('content_type')
element_args.pop('id')
element_args.pop('object_id')
copy_of_logical_file.metadata.create_element(element.term, **element_args)
return copy_of_logical_file
def delete(self, using=None, keep_parents=False):
"""Overriding the django model delete() here so that subclasses can do further
override if needed"""
super(AbstractLogicalFile, self).delete()
def logical_delete(self, user, delete_res_files=True):
"""
Deletes the logical file as well as all resource files associated with this logical file.
This function is primarily used by the system to delete logical file object and associated
metadata as part of deleting a resource file object. Any time a request is made to
deleted a specific resource file object, if the the requested file is part of a
logical file then all files in the same logical file group will be deleted. if custom logic
requires deleting logical file object (LFO) then instead of using LFO.delete(), you must
use LFO.logical_delete()
:param user user who is deleting file type/aggregation
:param delete_res_files If True all resource files that are part of this logical file will
be deleted
"""
from hs_core.hydroshare.resource import delete_resource_file
parent_aggr = self.get_parent()
resource = self.resource
# delete associated metadata and map xml documents
istorage = resource.get_irods_storage()
if istorage.exists(self.metadata_file_path):
istorage.delete(self.metadata_file_path)
if istorage.exists(self.map_file_path):
istorage.delete(self.map_file_path)
# delete all resource files associated with this instance of logical file
if delete_res_files:
for f in self.files.all():
delete_resource_file(resource.short_id, f.id, user, delete_logical_file=False)
# delete logical file first then delete the associated metadata file object
# deleting the logical file object will not automatically delete the associated
# metadata file object
metadata = self.metadata if self.has_metadata else None
self.delete()
if metadata is not None:
# this should also delete on all metadata elements that have generic relations with
# the metadata object
metadata.delete()
# if the this deleted aggregation has a parent aggregation - recreate xml files for the parent
# aggregation so that the references to the deleted aggregation can be removed
if parent_aggr is not None:
parent_aggr.create_aggregation_xml_documents()
resource.cleanup_aggregations()
def remove_aggregation(self):
"""Deletes the aggregation object (logical file) *self* and the associated metadata
object. However, it doesn't delete any resource files that are part of the aggregation."""
# delete associated metadata and map xml document
istorage = self.resource.get_irods_storage()
if istorage.exists(self.metadata_file_path):
istorage.delete(self.metadata_file_path)
if istorage.exists(self.map_file_path):
istorage.delete(self.map_file_path)
# find if there is a parent aggregation - files in this (self) aggregation
# need to be added to parent if exists
parent_aggr = self.get_parent()
res_files = []
res_files.extend(self.files.all())
# first need to set the aggregation for each of the associated resource files to None
# so that deleting the aggregation (logical file) does not cascade to deleting of
# resource files associated with the aggregation
for res_file in self.files.all():
res_file.logical_file_content_object = None
res_file.save()
# delete logical file (aggregation) first then delete the associated metadata file object
# deleting the logical file object will not automatically delete the associated
# metadata file object
metadata = self.metadata if self.has_metadata else None
self.delete()
if metadata is not None:
# this should also delete on all metadata elements that have generic relations with
# the metadata object
metadata.delete()
# make all the resource files of this (self) aggregation part of the parent aggregation
if parent_aggr is not None:
for res_file in res_files:
parent_aggr.add_resource_file(res_file)
# need to regenerate the xml files for the parent so that the references to this deleted aggregation
# can be removed from the parent xml files
parent_aggr.create_aggregation_xml_documents()
post_remove_file_aggregation.send(
sender=self.__class__,
resource=self.resource,
res_files=self.files.all()
)
self.resource.setAVU("bag_modified", True)
self.resource.setAVU('metadata_dirty', 'true')
def get_parent(self):
"""Find the parent model instance or fileset aggregation of this aggregation
:return a model instance or fileset aggregation if found, otherwise None
"""
parent_aggr = None
aggr_path = self.aggregation_name
if aggr_path and "/" in aggr_path:
parent_aggr_path = os.path.dirname(aggr_path)
# first check for a model instance aggregation in the path
parent_aggr = self.resource.get_model_aggregation_in_path(parent_aggr_path)
if parent_aggr is None:
parent_aggr = self.resource.get_fileset_aggregation_in_path(parent_aggr_path)
return parent_aggr
@property
def has_parent(self):
"""Checks if this aggregation has a parent aggregation
:return True if there is a parent aggregation otherwise False
"""
return self.get_parent() is not None
def get_children(self):
"""Returns a list of all aggregations that are directly under the folder that represents
this (self) aggregation
:return a list of aggregations
Note: Aggregation types that support nested aggregation must override this method
"""
return []
@property
def has_children(self):
"""Returns True if the this aggregation contains any other aggregations, otherwise False"""
return len(self.get_children()) > 0
@property
def has_children_spatial_data(self):
"""Returns True if any of the contained aggregation has spatial data, otherwise False"""
return any(child_aggr.metadata.spatial_coverage is not None for child_aggr in
self.get_children())
@property
def has_children_temporal_data(self):
"""Returns True if any of the contained aggregation has temporal data, otherwise False"""
return any(child_aggr.metadata.temporal_coverage is not None for child_aggr in
self.get_children())
def create_aggregation_xml_documents(self, create_map_xml=True):
"""Creates aggregation map xml and aggregation metadata xml files
:param create_map_xml: if true, aggregation map xml file will be created
"""
log = logging.getLogger()
# create a temp dir where the xml files will be temporarily saved before copying to iRODS
tmpdir = os.path.join(settings.TEMP_FILE_DIR, str(random.getrandbits(32)), uuid4().hex)
istorage = self.resource.get_irods_storage()
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
# create and copy the map and metadata xml documents for the aggregation
meta_from_file_name = os.path.join(tmpdir, 'metadata.xml')
map_from_file_name = os.path.join(tmpdir, 'map.xml')
try:
with open(meta_from_file_name, 'w') as out:
out.write(self.metadata.get_xml())
to_file_name = self.metadata_file_path
istorage.saveFile(meta_from_file_name, to_file_name, True)
log.debug("Aggregation metadata xml file:{} created".format(to_file_name))
if create_map_xml:
with open(map_from_file_name, 'w') as out:
out.write(self.generate_map_xml())
to_file_name = self.map_file_path
istorage.saveFile(map_from_file_name, to_file_name, True)
log.debug("Aggregation map xml file:{} created".format(to_file_name))
# setting bag flag to dirty - as resource map document needs to be re-generated as
# resource map xml file has references to aggregation map xml file paths
set_dirty_bag_flag(self.resource)
except Exception as ex:
log.error("Failed to create aggregation metadata xml file. Error:{}".format(str(ex)))
raise ex
finally:
shutil.rmtree(tmpdir)
def generate_map_xml(self):
"""Generates the xml needed to write to the aggregation map xml document"""
from hs_core.hydroshare import encode_resource_url
from hs_core.hydroshare.utils import current_site_url, get_file_mime_type
current_site_url = current_site_url()
# This is the qualified resource url.
hs_res_url = os.path.join(current_site_url, 'resource', self.resource.file_path)
# this is the path to the resource metadata file for download
aggr_metadata_file_path = self.metadata_short_file_path
metadata_url = os.path.join(hs_res_url, aggr_metadata_file_path)
metadata_url = encode_resource_url(metadata_url)
# this is the path to the aggregation resourcemap file for download
aggr_map_file_path = self.map_short_file_path
res_map_url = os.path.join(hs_res_url, aggr_map_file_path)
res_map_url = encode_resource_url(res_map_url)
# make the resource map:
utils.namespaces['citoterms'] = Namespace('http://purl.org/spar/cito/')
utils.namespaceSearchOrder.append('citoterms')
ag_url = | |
<reponame>jingmouren/OpenHGNN
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from . import BaseModel, register_model
@register_model('HGSL')
class HGSL(BaseModel):
r"""
Description
-----------
HGSL, Heterogeneous Graph Structure Learning from paper <http://www.shichuan.org/doc/100.pdf>.
Parameters
----------
feat_dims : dict
The feature dimensions of different node types.
undirected_relations : str
The HGSL model can only handle undirected heterographs, while in the dgl.heterograph format, directed edges are
stored in two different edge types, separately and symmetrically, to represent undirected edge. Hence you have
to specify which relations are those distinct undirected relations. In this parameter, each undirected relation
is separated with a comma. For example, in a heterograph with 2 undirected relations: paper-author and
paper-subject, there are 4 type of edges stored in the dgl.heterograph: paper-author, author-paper,
paper-subject, subject-paper. Then this parameter can be "paper-author,paper-subject",
"author-paper,paper-subject", "paper-author,subject-paper" or "author-paper,subject-paper".
device: str
The GPU device to select, like 'cuda:0'.
metapaths : list
The metapath name list.
mp_emb_dim : int
The dimension of metapath embeddings from metapath2vec.
hidden_dim : int
The dimension of mapped features in the graph generating procedure.
num_heads: int
Number of heads in the K-head weighted cosine similarity function.
fs_eps : float
Threshold of feature similarity graph :math:`\epsilon^{FS}`.
fp_eps : float
Threshold of feature propagation graph :math:`\epsilon^{FP}`.
mp_eps : float
Threshold of semantic graph :math:`\epsilon^{MP}`.
gnn_emd_dim : int
The dimension of hidden layers of the downstream GNN.
gnn_dropout : float
The dropout ratio of features in the downstream GNN.
category : str
The target node type which the model will predict on.
out_dim : int
number of classes of the target node type.
Attributes
-----------
fgg_direct : nn.ModuleDict
Feature similarity graph generator(:math:`S_r^{FS}`) dict in equation 2 of paper, in which keys are
undirected-relation strs.
fgg_left: nn.ModuleDict
Feature propagation graph generator(:math:`S_r^{FH}`) dict which generates the graphs in equation 5 of paper.
fgg_right: nn.ModuleDict
Feature propagation graph generator(:math:`S_r^{FT}`) dict which generates the graphs in equation 6 of paper.
fg_agg : nn.ModuleDict
A channel attention layer, in which a layer fuses one feature similarity graph and two feature propagation
graphs generated, in equation 7 of paper.
sgg_gen : nn.ModuleDict
Semantic subgraph generator(:math:`S_{r,m}^{MP}`) dict, in equation 8 of paper.
sg_agg : nn.ModuleDict
The channel attention layer which fuses semantic subgraphs, in equation 9 of paper.
overall_g_agg : nn.ModuleDict
The channel attention layer which fuses the learned feature graph, semantic graph and the original graph.
encoder : nn.ModuleDict
The type-specific mapping layer in equation 1 of paper.
Note
----
This model under the best config has some slight differences compared with the code given by the paper author,
which seems having little impact on performance:
1. The regularization item in loss is on all parameters of the model, while in the author's code, it is only on the
generated adjacent matrix. If you want to implement the latter, a new task of OpenHGNN is needed.
2. The normalization of input adjacent matrix is separately on different adjacent matrices of different
relations, while in the author's code, it is on the entire adjacent matrix composed of adjacent matrices of all
relations.
"""
@classmethod
def build_model_from_args(cls, args, hg):
feat_dims = dict()
for ntype in hg.ntypes:
feat_dims[ntype] = hg.nodes[ntype].data['h'].shape[1]
# Extract undirected_relations
und_rels = args.undirected_relations.split(',')
undirected_relations = list()
for etype in hg.canonical_etypes:
if etype[1] in und_rels:
undirected_relations.append(etype)
device = hg.device
metapaths = list()
for feature_name in hg.nodes["paper"].data.keys():
if "m2v" in feature_name:
metapaths.append(feature_name)
mp_emb_dim = hg.nodes["paper"].data["pap_m2v_emb"].shape[1]
return cls(feat_dims=feat_dims, undirected_relations=undirected_relations, device=device, metapaths=metapaths,
mp_emb_dim=mp_emb_dim, hidden_dim=args.hidden_dim, num_heads=args.num_heads,
fs_eps=args.fs_eps, fp_eps=args.fp_eps, mp_eps=args.mp_eps,
gnn_emd_dim=args.gnn_emd_dim, gnn_dropout=args.gnn_dropout,
category=args.category, num_class=args.out_dim)
def __init__(self, feat_dims, undirected_relations, device, metapaths, mp_emb_dim, hidden_dim, num_heads, fs_eps,
fp_eps, mp_eps, gnn_emd_dim, gnn_dropout, category, num_class):
super().__init__()
self.device = device
self.ud_rels = undirected_relations
self.node_types = list(feat_dims.keys())
self.feat_dims = feat_dims
self.non_linear = nn.ReLU()
self.category = category
self.metapaths = metapaths
nnmd = nn.ModuleDict
self.fgg_direct, self.fgg_left, self.fgg_right, self.fg_agg, self.sgg_gen, self.sg_agg, self.overall_g_agg = \
nnmd({}), nnmd({}), nnmd({}), nnmd({}), nnmd({}), nnmd({}), nnmd({})
# Feature encoder
self.encoder = nnmd(
dict(zip(self.node_types, [nn.Linear(feat_dims[node_type], hidden_dim) for node_type in self.node_types])))
for canonical_etype in undirected_relations:
undirected_relation = canonical_etype[1]
# Feature Graph Generator
self.fgg_direct[undirected_relation] = GraphGenerator(hidden_dim, num_heads, fs_eps, self.device)
self.fgg_left[undirected_relation] = GraphGenerator(feat_dims[canonical_etype[0]], num_heads, fp_eps,
self.device)
self.fgg_right[undirected_relation] = GraphGenerator(feat_dims[canonical_etype[2]], num_heads, fp_eps,
self.device)
self.fg_agg[undirected_relation] = GraphChannelAttLayer(3)
# Semantic Graph Generator
self.sgg_gen[undirected_relation] = nnmd(dict(
zip(metapaths, [GraphGenerator(mp_emb_dim, num_heads, mp_eps, self.device) for _ in metapaths])))
self.sg_agg[undirected_relation] = GraphChannelAttLayer(len(metapaths))
# Overall Graph Generator
self.overall_g_agg[undirected_relation] = GraphChannelAttLayer(3)
# Graph Convolution
if len(set(feat_dims.values())) == 1:
self.GCN = GCN(list(self.feat_dims.values())[0], gnn_emd_dim, num_class, gnn_dropout)
else:
raise Exception("Downstream model GCN can only accept features for "
"different node types of the same dimension")
def forward(self, hg, h_features):
r"""
Parameters
----------
hg : dgl.DGlHeteroGraph
All input data is stored in this graph.
The graph should be an undirected heterogeneous graph.
Every node type in graph should have its feature named 'h' and the same feature dimension.
Every node type in graph should have its metapath2vec embedding feature named 'xxx_m2v_emb'
and the same feature dimension.
h_features : dict
Not used.
Returns
--------
result : dict
The target node type and the corresponding node embeddings.
"""
def generate_node_indexes(hg):
indexes = dict()
index = 0
for node_type in hg.ntypes:
indexes[node_type] = (index, index + hg.num_nodes(node_type))
index += hg.num_nodes(node_type)
return indexes
def construct_homo_adj(new_adjs, hg, node_indexes, device):
new_homo_adj = torch.zeros(size=(hg.num_nodes(), hg.num_nodes())).to(device)
for canonical_etype, new_adj in new_adjs.items():
row_range = node_indexes[canonical_etype[0]]
column_range = node_indexes[canonical_etype[2]]
new_homo_adj[row_range[0]:row_range[1], column_range[0]:column_range[1]] = new_adj
new_homo_adj += new_homo_adj.t()
new_homo_adj = F.normalize(new_homo_adj, dim=0, p=1)
return new_homo_adj
def construct_homo_feature(hg, device):
homo_feature = list()
for ntype in hg.ntypes:
homo_feature.append(hg.nodes[ntype].data['h'])
homo_feature = torch.cat(homo_feature, dim=0).to(device)
return homo_feature
# Heterogeneous Feature Mapping
mapped_feats = dict()
for ntype in self.node_types:
mapped_feats[ntype] = self.non_linear(self.encoder[ntype](hg.nodes[ntype].data['h']))
# Heterogeneous Graph Generation
new_adjs = dict()
for canonical_etype in self.ud_rels:
undirected_relation = canonical_etype[1]
ori_g = F.normalize(hg.adj(etype=canonical_etype).to_dense().to(self.device), dim=1, p=2)
# Feature Graph Generation
fg_direct = self.fgg_direct[undirected_relation](mapped_feats[canonical_etype[0]],
mapped_feats[canonical_etype[2]])
fmat_l, fmat_r = hg.nodes[canonical_etype[0]].data['h'], hg.nodes[canonical_etype[2]].data['h']
sim_l, sim_r = self.fgg_left[undirected_relation](fmat_l, fmat_l), self.fgg_right[undirected_relation](
fmat_r, fmat_r)
fg_left, fg_right = sim_l.mm(ori_g), sim_r.mm(ori_g.t()).t()
feat_g = self.fg_agg[undirected_relation]([fg_direct, fg_left, fg_right])
# Semantic Graph Generation
sem_g_list = [self.sgg_gen[undirected_relation][mp](hg.nodes[canonical_etype[0]].data[mp],
hg.nodes[canonical_etype[2]].data[mp]) for mp in
self.metapaths]
sem_g = self.sg_agg[undirected_relation](sem_g_list)
# Overall Graph
new_adjs[canonical_etype] = self.overall_g_agg[undirected_relation]([feat_g, sem_g, ori_g])
node_indexes = generate_node_indexes(hg)
new_homo_adj = construct_homo_adj(new_adjs, hg, node_indexes, self.device)
homo_feature = construct_homo_feature(hg, self.device)
x = self.GCN(homo_feature, new_homo_adj)
result = {self.category: x[node_indexes[self.category][0]:node_indexes[self.category][1], :]}
return result
class MetricCalcLayer(nn.Module):
r"""
Description
-----------
Calculate metric in equation 3 of paper.
Parameters
----------
nhid : int
The dimension of mapped features in the graph generating procedure.
"""
def __init__(self, nhid):
super().__init__()
self.weight = nn.Parameter(torch.FloatTensor(1, nhid))
nn.init.xavier_uniform_(self.weight)
def forward(self, h):
r"""
Parameters
----------
h : tensor
The result of the Hadamard product in equation 3 of paper.
"""
return h * self.weight
class GraphGenerator(nn.Module):
r"""
Description
-----------
Generate a graph using similarity.
"""
def __init__(self, dim, num_head=2, threshold=0.1, dev=None):
super(GraphGenerator, self).__init__()
self.threshold = threshold
self.metric_layer = nn.ModuleList()
for i in range(num_head):
self.metric_layer.append(MetricCalcLayer(dim))
self.num_head = num_head
self.dev = dev
def forward(self, left_h, right_h):
r"""
Parameters
----------
left_h : tensor
The first input embedding matrix.
right_h : tensor
The second input embedding matrix.
"""
def cos_sim(a, b, eps=1e-8):
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt
if torch.sum(left_h) == 0 or torch.sum(right_h) == 0:
return torch.zeros((left_h.shape[0], right_h.shape[0])).to(self.dev)
s = torch.zeros((left_h.shape[0], right_h.shape[0])).to(self.dev)
zero_lines = torch.nonzero(torch.sum(left_h, 1) == 0)
# The ReLU function will generate zero lines, which lead to the nan (divided by zero) problem.
if len(zero_lines) > 0:
left_h[zero_lines, :] += 1e-8
for i in range(self.num_head):
weighted_left_h = self.metric_layer[i](left_h)
weighted_right_h = self.metric_layer[i](right_h)
s += cos_sim(weighted_left_h, weighted_right_h)
s /= self.num_head
s = torch.where(s < self.threshold, torch.zeros_like(s), s)
return s
class GraphChannelAttLayer(nn.Module):
r"""
Description
-----------
The graph channel attention layer in equation 7, 9 and 10 of paper.
"""
def __init__(self, num_channel):
super(GraphChannelAttLayer, self).__init__()
self.weight = nn.Parameter(torch.Tensor(num_channel, 1, 1))
nn.init.constant_(self.weight, 0.1) # equal weight
def forward(self, adj_list):
r"""
Parameters
----------
adj_list : list
The list of adjacent matrices.
"""
adj_list = torch.stack(adj_list)
# Row normalization of all graphs generated
adj_list = F.normalize(adj_list, dim=1, p=1)
# Hadamard | |
self.fid[grp_path]
if res is None:
res = grp['delta_time'][0, :].astype(int)
else:
res = np.append(res, grp['delta_time'][0, :].astype(int))
else:
grp_path = PurePosixPath(msm_path, 'OBSERVATIONS')
grp = self.fid[str(grp_path)]
res = grp['delta_time'][0, :].astype(int)
return res
def get_instrument_settings(self, band=None) -> np.ndarray:
"""
Returns instrument settings of measurement
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = grp['instrument_settings'][:]
else:
res = np.append(res, grp['instrument_settings'][:])
elif msm_type == 'DPQF_MAP':
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[grp_path]
if res is None:
res = grp['instrument_settings'][:]
else:
res = np.append(res, grp['instrument_settings'][:])
elif msm_type == 'NOISE':
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_msmt_keys']
icid = dset['icid'][dset.size // 2]
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
f'BACKGROUND_RADIANCE_MODE_{icid:04d}',
'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = grp['instrument_settings'][:]
else:
grp_path = PurePosixPath(msm_path, 'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = grp['instrument_settings'][:]
return res
def get_exposure_time(self, band=None) -> list:
"""
Returns pixel exposure time of the measurements, which is calculated
from the parameters 'int_delay' and 'int_hold' for SWIR.
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
"""
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
# obtain instrument settings
instr_arr = self.get_instrument_settings(band)
if instr_arr is None:
return None
# calculate exact exposure time
res = []
for instr in instr_arr:
if int(band) > 6:
res.append(1.25e-6 * (65540
- instr['int_delay'] + instr['int_hold']))
else:
res.append(instr['exposure_time'])
return res
def get_housekeeping_data(self, band=None):
"""
Returns housekeeping data of measurements
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = None
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = np.squeeze(grp['housekeeping_data'])
else:
res = np.append(res, np.squeeze(grp['housekeeping_data']))
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath('BAND{band}_CALIBRATION',
name.decode('ascii'),
'INSTRUMENT')
grp = self.fid[str(grp_path)]
if res is None:
res = np.squeeze(grp['housekeeping_data'])
else:
res = np.append(res, np.squeeze(grp['housekeeping_data']))
else:
grp_path = PurePosixPath(msm_path, 'INSTRUMENT')
grp = self.fid[str(grp_path)]
res = np.squeeze(grp['housekeeping_data'])
return res
# -------------------------
def get_msmt_keys(self, band=None):
"""
Read msmt_keys from the analysis groups
Parameters
----------
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
Returns
-------
[ANALOG_OFFSET_SWIR] analog_offset_swir_group_keys
[LONG_TERM_SWIR] long_term_swir_group_keys
[NOISE] noise_msmt_keys
else None
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
return np.squeeze(grp[msm_type.lower() + '_group_keys'])
if msm_type == 'NOISE':
grp = self.fid[msm_path]
return np.squeeze(grp[msm_type.lower() + '_msmt_keys'])
return None
# -------------------------
def get_msm_attr(self, msm_dset, attr_name, band=None):
"""
Returns attribute of measurement dataset "msm_dset"
Parameters
----------
msm_dset : string
Name of measurement dataset
attr_name : string
Name of the attribute
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product.
Default is 'None' which returns the first available band
Returns
-------
out : scalar or numpy array
value of attribute "attr_name"
"""
if not self.__msm_path:
return None
if band is None:
band = self.bands[0]
elif band not in self.bands:
raise ValueError('band not found in product')
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
ds_path = PurePosixPath(str(self.__msm_path).replace('%', band),
dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
if attr_name in self.fid[str(ds_path)].attrs:
attr = self.fid[str(ds_path)].attrs[attr_name]
if isinstance(attr, bytes):
return attr.decode('ascii')
return attr
return None
def get_geo_data(self, band=None,
geo_dset='satellite_latitude,satellite_longitude'):
"""
Returns data of selected datasets from the GEODATA group
Parameters
----------
geo_dset : string
Name(s) of datasets in the GEODATA group, comma separated
Default is 'satellite_latitude,satellite_longitude'
band : None or {'1', '2', '3', ..., '8'}
Select one of the band present in the product
Default is 'None' which returns the first available band
Returns
-------
out : dictionary
dictionary data of selected datasets from the GEODATA group
names of dictionary are taken from parameter geo_dset
"""
if not self.__msm_path:
return None
if band is None:
band = str(self.bands[0])
elif band not in self.bands:
raise ValueError('band not found in product')
msm_path = str(self.__msm_path).replace('%', band)
msm_type = self.__msm_path.name
res = {}
if msm_type in ['ANALOG_OFFSET_SWIR', 'LONG_TERM_SWIR']:
grp = self.fid[msm_path]
dset = grp[msm_type.lower() + '_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
elif msm_type in ['DPQF_MAP', 'NOISE']:
grp_path = PurePosixPath(msm_path).parent / 'ANALOG_OFFSET_SWIR'
grp = self.fid[str(grp_path)]
dset = grp['analog_offset_swir_group_keys']
group_keys = dset['group'][:]
for name in group_keys:
grp_path = PurePosixPath(f'BAND{band}_CALIBRATION',
name.decode('ascii'),
'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
else:
grp_path = PurePosixPath(msm_path, 'GEODATA')
grp = self.fid[str(grp_path)]
for key in geo_dset.split(','):
res[key] = np.squeeze(grp[key])
return res
def get_msm_data(self, msm_dset, band='78', *, read_raw=False,
columns=None, fill_as_nan=True):
"""
Read datasets from a measurement selected by class-method "select"
Parameters
----------
msm_dset : string
name of measurement dataset
if msm_dset is None then show names of available datasets
band : {'1', '2', '3', ..., '8', '12', '34', '56', '78'}
Select data from one spectral band or channel
Default is '78' which combines band 7/8 to SWIR detector layout
read_raw : boolean
Perform raw read: without slicing or data conversion,
and ignore keywords: colums, fill_as_nan.
Default: False
columns : [i, j]
Slice data on fastest axis (columns) as from index 'i' to 'j'
fill_as_nan : boolean
Replace (float) FillValues with Nan's, when True
Returns
-------
out : array
Data of measurement dataset "msm_dset"
"""
fillvalue = float.fromhex('0x1.ep+122')
if not self.__msm_path:
return None
if not isinstance(band, str):
raise TypeError('band must be a string')
if band not in self.bands:
raise ValueError('band not found in product')
data = []
if read_raw:
for ii in band:
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
msm_path = str(self.__msm_path).replace('%', ii)
ds_path = PurePosixPath(msm_path, dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
data.append(np.squeeze(self.fid[str(ds_path)]))
return data
# skip row257 from the SWIR detector
rows = None
if int(band[0]) > 6:
rows = [0, -1]
# list potential names of the dataset dimensions
time_list = ['time', 'scanline']
row_list = ['width', 'pixel', 'pixel_window', 'ground_pixel']
column_list = ['height', 'spectral_channel', 'spectral_channel_window']
column_dim = None # column dimension is unknown
for ii in band:
for dset_grp in ['OBSERVATIONS', 'ANALYSIS', '']:
msm_path = str(self.__msm_path).replace('%', ii)
ds_path = PurePosixPath(msm_path, dset_grp, msm_dset)
if str(ds_path) not in self.fid:
continue
dset = self.fid[str(ds_path)]
skipped = 0
data_sel = ()
for xx in range(dset.ndim):
if len(dset.dims[xx][0][:]) == 1:
skipped += 1
dim_name = PurePosixPath(dset.dims[xx][0].name).name
if dim_name in time_list:
data_sel += (slice(None),)
elif dim_name in row_list:
if rows is None:
data_sel += (slice(None),)
else:
data_sel += (slice(*rows),)
elif dim_name in column_list:
column_dim = xx - skipped
if columns is None:
data_sel += (slice(None),)
else:
data_sel += (slice(*columns),)
else:
raise ValueError
if dset.dtype == np.float32:
res = np.squeeze(dset.astype(float)[data_sel])
else:
res = | |
window_remove_all(self):
"""Remove all filtering windows."""
self.window_list = []
def window_print_all(self):
"""Print information of all filter windows to console"""
for i in range(len(self.window_list)):
window = self.window_list[i]
for k in window.keys():
if k == "turbines":
str_short = ut.convert_list_to_ranges(
self.window_list[i][k]
)
print("window_list[%d][%s] = " % (i, k), str_short)
elif not k == "idx":
print(
"window_list[%d][%s] = " % (i, k),
self.window_list[i][k],
)
print("")
def filter_by_windows(self):
"""Apply window filters to the dataset for the turbines of interest.
"""
print("Filtering data by specified regions...")
for ti in self.turbine_list:
df = self.df.copy()
out_of_window_ids = np.zeros(df.shape[0])
window_list = [w for w in self.window_list if ti in w["turbines"]]
print(" ")
print(
"Applying %d window filters to the df for turbine %d"
% (len(window_list), ti)
)
for window in window_list:
idx = window["idx"]
ws_range = window["ws_range"]
pow_range = window["pow_range"]
axis = window["axis"]
if axis == 0:
ii_out_of_window = filters.window_range_flag(
df["pow_%03d" % ti],
pow_range[0],
pow_range[1],
df["ws_%03d" % ti],
ws_range[0],
ws_range[1],
)
else:
ii_out_of_window = filters.window_range_flag(
df["ws_%03d" % ti],
ws_range[0],
ws_range[1],
df["pow_%03d" % ti],
pow_range[0],
pow_range[1],
)
# Merge findings from all windows
out_of_window_ids[ii_out_of_window] = int(1)
print(
" Removed %d outliers using window[%d]."
% (int(sum(ii_out_of_window)), idx)
)
print(
"Removed a total of %d outliers using the %d windows."
% (int(sum(out_of_window_ids)), len(window_list))
)
df_out_of_windows = np.zeros(self.df.shape[0])
out_of_window_indices = df.index[np.where(out_of_window_ids)[0]]
df_out_of_windows[out_of_window_indices] = 1
self.df_filters[ti]["window_outlier"] = [
bool(i) for i in df_out_of_windows
]
# Finally, update status columns in dataframe
self._update_status_flags()
def filter_by_power_curve(
self,
m_ws_lb=0.95,
m_pow_lb=1.01,
m_ws_rb=1.05,
m_pow_rb=0.99,
ws_deadband=0.50,
pow_deadband=20.0,
no_iterations=10,
cutoff_ws=25.0,
):
"""Filter the data by offset from the mean power curve in x-
directions. This is an iterative process because the estimated mean
curve actually changes as data is filtered. This process typically
converges within a couple iterations.
Args:
m_ws_lb (float, optional): Multiplier on the wind speed defining
the left bound for the power curve. Any data to the left of this
curve is considered faulty. Defaults to 0.95.
m_pow_lb (float, optional): Multiplier on the power defining
the left bound for the power curve. Any data to the left of this
curve is considered faulty. Defaults to 1.01.
m_ws_rb (float, optional): Multiplier on the wind speed defining
the right bound for the power curve. Any data to the right of this
curve is considered faulty. Defaults to 1.05.
m_pow_rb (float, optional): Multiplier on the power defining
the right bound for the power curve. Any data to the right of this
curve is considered faulty. Defaults to 0.99.
no_iterations (int, optional): Number of iterations. The
solution typically converges in 2-3 steps, but as the process is
very fast, it's better to run a higher number of iterations.
Defaults to 10.
"""
print("Filtering data by deviations from the mean power curve...")
for ii in range(no_iterations):
# Create upper and lower bounds around mean curve
df_xy = self.pw_curve_df.copy()
x_full = np.array(df_xy["ws"], dtype=float)
x = x_full[x_full < cutoff_ws] # Only filter until 15 m/s
self.pw_curve_df_bounds = pd.DataFrame({"ws": x})
for ti in self.turbine_list:
y = np.array(df_xy["pow_%03d" % ti], dtype=float)
y = y[x_full < cutoff_ws] # Only filter until 15 m/s
if np.all(np.isnan(y)):
self.pw_curve_df_bounds["pow_%03d_lb" % ti] = None
self.pw_curve_df_bounds["pow_%03d_rb" % ti] = None
continue
# Create interpolants to left and right of mean curve
ws_array = np.array(self.df["ws_%03d" % ti], dtype=float)
pow_array = np.array(self.df["pow_%03d" % ti], dtype=float)
# Specify left side bound and non-decreasing
lb_ws = x * m_ws_lb - ws_deadband / 2.0
lb_pow = y * m_pow_lb + pow_deadband / 2.0
# Make sure first couple entries are not NaN
jjj = 0
while np.isnan(lb_pow[jjj]):
lb_pow[jjj] = jjj / 1000.0
jjj = jjj + 1
# Ensure non-decreasing for lower half of wind speeds
id_center = np.argmin(np.abs(lb_ws - 9.0)) # Assume value is fine near 9 m/s
lb_ws_l = lb_ws[0:id_center]
lb_pow_l = lb_pow[0:id_center]
good_ids = (
np.hstack([(np.diff(lb_pow_l) >= 0.0), True])
&
(~np.isnan(lb_pow[0:id_center]))
)
good_ids[0] = True
lb_pow_l = np.interp(lb_ws_l, lb_ws_l[good_ids], lb_pow_l[good_ids])
lb_pow[0:id_center] = lb_pow_l
non_nans = (~np.isnan(lb_pow) & ~np.isnan(lb_ws))
lb_pow = lb_pow[non_nans]
lb_ws = lb_ws[non_nans]
# Specify right side bound and ensure monotonically increasing
rb_ws = x * m_ws_rb + ws_deadband / 2.0
rb_pow = y * m_pow_rb - pow_deadband / 2.0
# Make sure first couple entries are not NaN
jjj = 0
while np.isnan(rb_pow[jjj]):
rb_pow[jjj] = jjj / 1000.0
jjj = jjj + 1
# Ensure non-decreasing for lower half of wind speeds
id_center = np.argmin(np.abs(rb_ws - 9.0)) # Assume value is fine near 9 m/s
rb_ws_l = rb_ws[0:id_center]
rb_pow_l = rb_pow[0:id_center]
good_ids = (
np.hstack([(np.diff(rb_pow_l) >= 0.0), True])
&
(~np.isnan(rb_pow[0:id_center]))
)
good_ids[0] = True
rb_pow_l = np.interp(rb_ws_l, rb_ws_l[good_ids], rb_pow_l[good_ids])
rb_pow[0:id_center] = rb_pow_l
non_nans = (~np.isnan(rb_pow) & ~np.isnan(rb_ws))
rb_pow = rb_pow[non_nans]
rb_ws = rb_ws[non_nans]
# Finally interpolate
ws_lb = np.interp(
x=pow_array,
xp=lb_pow,
fp=lb_ws,
left=np.nan,
right=np.nan,
)
ws_rb = np.interp(
x=pow_array,
xp=rb_pow,
fp=rb_ws,
left=np.nan,
right=np.nan,
)
out_of_bounds = (ws_array < ws_lb) | (ws_array > ws_rb)
self.df_filters[ti]["mean_pow_curve_outlier"] = out_of_bounds
# Write left and right bound to own curve
self.pw_curve_df_bounds["pow_%03d_lb" % ti] = np.interp(
x=x,
xp=lb_ws,
fp=lb_pow,
left=np.nan,
right=np.nan,
)
self.pw_curve_df_bounds["pow_%03d_rb" % ti] = np.interp(
x=x,
xp=rb_ws,
fp=rb_pow,
left=np.nan,
right=np.nan,
)
# Update status flags and re-estimate mean power curve
verbose = ii == no_iterations - 1 # Only print final iteration
self._update_status_flags(verbose=verbose)
def filter_by_floris_power_curve(
self,
fi,
m_ws_lb=0.95,
m_pow_lb=1.01,
m_ws_rb=1.05,
m_pow_rb=0.99,
ws_deadband=0.50,
pow_deadband=20.0,
cutoff_ws=25.0,
):
"""Filter the data by offset from the floris power curve in x-
directions.
Args:
fi (FlorisInterface): The FlorisInterface object for the farm
m_ws_lb (float, optional): Multiplier on the wind speed defining
the left bound for the power curve. Any data to the left of this
curve is considered faulty. Defaults to 0.95.
m_pow_lb (float, optional): Multiplier on the power defining
the left bound for the power curve. Any data to the left of this
curve is considered faulty. Defaults to 1.01.
m_ws_rb (float, optional): Multiplier on the wind speed defining
the right bound for the power curve. Any data to the right of this
curve is considered faulty. Defaults to 1.05.
m_pow_rb (float, optional): Multiplier on the power defining
the right bound for the power curve. Any data to the right of this
curve is considered faulty. Defaults to 0.99.
"""
print("Filtering data by deviations from the floris power curve...")
# Create upper and lower bounds around floris curve
df_xy = self.pw_curve_df.copy()
rho = fi.floris.flow_field.air_density
for ti in range(len(fi.layout_x)):
fi_turb = fi.floris.farm.turbine_definitions[ti]
Ad = 0.25 * np.pi * fi_turb["rotor_diameter"] ** 2.0
ws_array = np.array(fi_turb["power_thrust_table"]["wind_speed"])
cp_array = np.array(fi_turb["power_thrust_table"]["power"])
pow_array = (
0.5 * rho * ws_array ** 3.0 * Ad * cp_array * 1.0e-3
)
df_xy.loc[df_xy.index, "pow_{:03d}".format(ti)] = (
np.interp(xp=ws_array, fp=pow_array, x=df_xy["ws"])
)
x_full = np.array(df_xy["ws"], dtype=float)
x = x_full[x_full < cutoff_ws]
self.pw_curve_df_bounds = pd.DataFrame({"ws": x})
for ti in self.turbine_list:
y = np.array(df_xy["pow_%03d" % ti], dtype=float)
y = y[x_full < cutoff_ws]
if np.all(np.isnan(y)):
self.pw_curve_df_bounds["pow_%03d_lb" % ti] = None
self.pw_curve_df_bounds["pow_%03d_rb" % ti] = None
continue
# Create interpolants to left and right of mean curve
ws_array = np.array(self.df["ws_%03d" % ti], dtype=float)
pow_array = np.array(self.df["pow_%03d" % ti], dtype=float)
# Specify left side bound and non-decreasing
lb_ws = x * m_ws_lb - ws_deadband / 2.0
lb_pow = y * m_pow_lb + pow_deadband / 2.0
# Make sure first couple entries are not NaN
jjj = 0
while np.isnan(lb_pow[jjj]):
lb_pow[jjj] = jjj / 1000.0
jjj = jjj + 1
# Ensure non-decreasing for lower half of wind speeds
id_center = np.argmin(np.abs(lb_ws - 9.0)) # Assume value is fine near 9 m/s
lb_ws_l = lb_ws[0:id_center]
lb_pow_l = lb_pow[0:id_center]
good_ids = (
np.hstack([(np.diff(lb_pow_l) >= 0.0), True])
&
(~np.isnan(lb_pow[0:id_center]))
)
good_ids[0] = True
lb_pow_l = np.interp(lb_ws_l, lb_ws_l[good_ids], lb_pow_l[good_ids])
lb_pow[0:id_center] = lb_pow_l
non_nans = (~np.isnan(lb_pow) & ~np.isnan(lb_ws))
lb_pow = lb_pow[non_nans]
lb_ws = lb_ws[non_nans]
# Specify right side bound and ensure monotonically increasing
rb_ws = x * m_ws_rb + ws_deadband / 2.0
rb_pow = y * m_pow_rb - pow_deadband / 2.0
# Make sure first couple entries are not NaN
jjj = 0
while np.isnan(rb_pow[jjj]):
rb_pow[jjj] = jjj / 1000.0
| |
nsample, nsimu):
'''
Define indices to sample from posteriors.
Args:
* **nsample** (:py:class:`int`): Number of samples to draw from posterior.
* **nsimu** (:py:class:`int`): Number of MCMC simulations.
Returns:
* **iisample** (:class:`~numpy.ndarray`): Array of indices in posterior set.
* **nsample** (:py:class:`int`): Number of samples to draw from posterior.
'''
# define sample points
if nsample >= nsimu:
iisample = range(nsimu) # sample all points from chain
nsample = nsimu
else:
# randomly sample from chain
iisample = np.ceil(np.random.rand(nsample)*nsimu) - 1
iisample = iisample.astype(int)
return iisample, nsample
# --------------------------------------------
def _calculate_ci_for_data_sets(self, testchain, waitbar, lims):
'''
Calculate credible intervals.
Args:
* **testchain** (:class:`~numpy.ndarray`): Sample points from posterior density.
* **iisample** (:class:`~numpy.ndarray`): Array of indices in posterior set.
* **waitbar** (:py:class:`bool`): Flag to turn on progress bar.
* **sstype** (:py:class:`int`): Flag to specify sstype.
Returns:
* **credible_intervals(:py:class:`list`): List of credible intervals.
'''
credible_intervals = []
for ii in range(len(self.datapred)):
datapredii, nrow, ncol, modelfun, test = self._setup_interval_ii(
ii=ii, datapred=self.datapred, nrow=self.__nrow, ncol=self.__ncol,
modelfunction=self.modelfunction, local=self.__local)
# Run interval generation on set ii
ysave = self._calc_credible_ii(
testchain=testchain, nrow=nrow, ncol=ncol,
waitbar=waitbar, test=test, modelfun=modelfun, datapredii=datapredii)
# generate quantiles
plim = self._generate_quantiles(ysave, lims, ncol)
credible_intervals.append(plim)
return credible_intervals
# --------------------------------------------
def _calculate_ci_and_pi_for_data_sets(self, testchain, s2chain, iisample, waitbar, sstype, lims):
'''
Calculate prediction/credible intervals.
Args:
* **testchain** (:class:`~numpy.ndarray`): Sample points from posterior density.
* **s2chain** (:class:`~numpy.ndarray`): Chain of observation errors.
* **iisample** (:class:`~numpy.ndarray`): Array of indices in posterior set.
* **waitbar** (:py:class:`bool`): Flag to turn on progress bar.
* **sstype** (:py:class:`int`): Flag to specify sstype.
Returns:
* **credible_intervals(:py:class:`list`): List of credible intervals.
* **prediction_intervals(:py:class:`list`): List of prediction intervals.
'''
credible_intervals = []
prediction_intervals = []
for ii in range(len(self.datapred)):
datapredii, nrow, ncol, modelfun, test = self._setup_interval_ii(
ii=ii, datapred=self.datapred, nrow=self.__nrow, ncol=self.__ncol,
modelfunction=self.modelfunction, local=self.__local)
s2ci = [self.__s2chain_index[ii][0], self.__s2chain_index[ii][1]]
tests2chain = s2chain[iisample, s2ci[0]:s2ci[1]]
# Run interval generation on set ii
ysave, osave = self._calc_credible_and_prediction_ii(
testchain=testchain, tests2chain=tests2chain, nrow=nrow, ncol=ncol,
waitbar=waitbar, sstype=sstype, test=test, modelfun=modelfun,
datapredii=datapredii)
# generate quantiles
plim = self._generate_quantiles(ysave, lims, ncol)
olim = self._generate_quantiles(osave, lims, ncol)
credible_intervals.append(plim)
prediction_intervals.append(olim)
return credible_intervals, prediction_intervals
# --------------------------------------------
@classmethod
def _setup_interval_ii(cls, ii, datapred, nrow, ncol, modelfunction, local):
'''
Setup value for interval ii.
Args:
* **ii** (:py:class:`int`): Iteration number.
* **datapred** (:py:class:`list`): List of data sets.
* **nrow** (:py:class:`list`): List of rows in each data set.
* **ncol** (:py:class:`list`): List of columns in each data set.
* **modelfun** (:py:class:`func` or :py:class:`list`): Model function handle.
Returns:
* **datapredii** (:class:`~numpy.ndarray`): Data set.
* **nrow** (:py:class:`int`): Number of rows in data set.
* **ncol** (:py:class:`int`): Number of columns in data set.
* **modelfun** (:py:class:`func`): Model function handle.
* **test** (:class:`~numpy.ndarray`): Array of booleans correponding to local test.
'''
datapredii = datapred[ii]
nrow = nrow[ii]
ncol = ncol[ii]
if isinstance(modelfunction, list):
modelfun = modelfunction[ii]
else:
modelfun = modelfunction
# some parameters may only apply to certain batch sets
test = set_local_parameters(ii=ii, local=local)
return datapredii, nrow, ncol, modelfun, test
# --------------------------------------------
def _calc_credible_ii(self, testchain, nrow, ncol, waitbar, test, modelfun, datapredii):
'''
Calculate response for set ii.
Args:
* **testchain** (:class:`~numpy.ndarray`): Sample points from posterior density.
* **nrow** (:py:class:`int`): Number of rows in data set.
* **ncol** (:py:class:`int`): Number of columns in data set.
* **waitbar** (:py:class:`bool`): Flag to turn on progress bar.
* **test** (:class:`~numpy.ndarray`): Array of booleans correponding to local test.
* **modelfun** (:py:class:`func`): Model function handle.
* **datapredii** (:class:`~numpy.ndarray`): Data set.
Returns:
* **ysave** (:class:`~numpy.ndarray`): Model responses.
'''
nsample = testchain.shape[0]
theta = self.__theta
ysave = np.zeros([nsample, nrow, ncol])
for kk, isa in enumerate(testchain):
# progress bar
if waitbar is True:
self.__wbarstatus.update(kk)
# extract chain set
theta[self.__parind[:]] = isa
th = theta[test]
# evaluate model
ypred = modelfun(datapredii, th)
ypred = ypred.reshape(nrow, ncol)
# store model prediction
ysave[kk, :, :] = ypred # store model output
return ysave
# --------------------------------------------
def _calc_credible_and_prediction_ii(self, testchain, tests2chain, nrow, ncol, waitbar,
sstype, test, modelfun, datapredii):
'''
Calculate response and observations for set ii.
Args:
* **testchain** (:class:`~numpy.ndarray`): Sample points from posterior density.
* **tests2chain** (:class:`~numpy.ndarray`): Sample points from observation errors.
* **nrow** (:py:class:`int`): Number of rows in data set.
* **ncol** (:py:class:`int`): Number of columns in data set.
* **waitbar** (:py:class:`bool`): Flag to turn on progress bar.
* **sstype** (:py:class:`int`): Flag to specify sstype.
* **test** (:class:`~numpy.ndarray`): Array of booleans correponding to local test.
* **modelfun** (:py:class:`func`): Model function handle.
* **datapredii** (:class:`~numpy.ndarray`): Data set.
Returns:
* **ysave** (:class:`~numpy.ndarray`): Model responses.
* **osave** (:class:`~numpy.ndarray`): Model responses with observation errors.
'''
nsample = testchain.shape[0]
theta = self.__theta
ysave = np.zeros([nsample, nrow, ncol])
osave = np.zeros([nsample, nrow, ncol])
for kk, isa in enumerate(testchain):
# progress bar
if waitbar is True:
self.__wbarstatus.update(kk)
# extract chain set
theta[self.__parind[:]] = isa
th = theta[test]
# evaluate model
ypred = modelfun(datapredii, th)
ypred = ypred.reshape(nrow, ncol)
s2elem = tests2chain[kk]
if s2elem.shape != (1, s2elem.size):
s2elem = s2elem.reshape(1, s2elem.shape[0]) # make row vector
opred = self._observation_sample(s2elem, ypred, sstype)
# store model prediction
ysave[kk, :, :] = ypred # store model output
osave[kk, :, :] = opred # store model output with observation errors
return ysave, osave
# --------------------------------------------
@classmethod
def _observation_sample(cls, s2elem, ypred, sstype):
'''
Calculate model response with observation errors.
Args:
* **s2elem** (:class:`~numpy.ndarray`): Observation error(s).
* **ypred** (:class:`~numpy.ndarray`): Model responses.
* **sstype** (:py:class:`int`): Flag to specify sstype.
Returns:
* **opred** (:class:`~numpy.ndarray`): Model responses with observation errors.
'''
# check shape of s2elem and ypred
ny = ypred.shape[1]
ns = s2elem.shape[1]
if ns != ny and ns == 1:
s2elem = s2elem*np.ones([ny, 1])
elif ns != ny and ns != 1:
sys.exit('Unclear data structure: error variances do not match size of model output')
if sstype == 0:
opred = ypred + np.matmul(np.random.standard_normal(ypred.shape), np.diagflat(
np.sqrt(s2elem))).reshape(ypred.shape)
elif sstype == 1: # sqrt
opred = (np.sqrt(ypred) + np.matmul(np.random.standard_normal(ypred.shape), np.diagflat(
np.sqrt(s2elem))).reshape(ypred.shape))**2
elif sstype == 2: # log
opred = ypred*np.exp(np.matmul(np.random.standard_normal(ypred.shape), np.diagflat(
np.sqrt(s2elem))).reshape(ypred.shape))
else:
sys.exit('Unknown sstype')
return opred
# --------------------------------------------
@classmethod
def _generate_quantiles(cls, response, lims, ncol):
'''
Generate quantiles based on observations.
Args:
* **response** (:class:`~numpy.ndarray`): Array of model responses.
* **lims** (:class:`~numpy.ndarray`): Array of quantile limits.
* **ncol** (:py:class:`int`): Number of columns in `ysave`.
Returns:
* **quantiles** (:py:class:`list`): Quantiles for intervals.
'''
# generate quantiles
quantiles = []
for jj in range(ncol):
quantiles.append(empirical_quantiles(response[:, :, jj], lims))
return quantiles
# ******************************************************************************
# --------------------------------------------
def plot_prediction_intervals(self, plot_pred_int=True, adddata=False, addlegend=True,
figsizeinches=None, model_display={}, data_display={},
interval_display={}):
'''
Plot prediction/credible intervals.
Args:
* **plot_pred_int** (:py:class:`bool`): Flag to include PI on plot.
* **adddata** (:py:class:`bool`): Flag to include data on plot.
* **addlegend** (:py:class:`bool`): Flag to include legend on plot.
* **figsizeinches** (:py:class:`list`): Specify figure size in inches [Width, Height].
* **model_display** (:py:class:`dict`): Model display settings.
* **data_display** (:py:class:`dict`): Data display settings.
* **interval_display** (:py:class:`dict`): Interval display settings.
Available display options (defaults in parantheses):
* **model_display**: `linestyle` (:code:`'-'`), `marker` (:code:`''`), \
`color` (:code:`'r'`), `linewidth` (:code:`2`), `markersize` (:code:`5`),\
`label` (:code:`model`), `alpha` (:code:`1.0`)
* **data_display**: `linestyle` (:code:`''`), `marker` (:code:`'.'`), \
`color` (:code:`'b'`), `linewidth` (:code:`1`), `markersize` (:code:`5`), \
`label` (:code:`data`), `alpha` (:code:`1.0`)
* **data_display**: `linestyle` (:code:`':'`), `linewidth` (:code:`1`), \
`alpha` (:code:`1.0`), `edgecolor` (:code:`'k'`)
'''
# unpack dictionary
credible_intervals = self.intervals['credible_intervals']
prediction_intervals = self.intervals['prediction_intervals']
prediction_intervals, figsizeinches, nbatch, nn, clabels, plabels = self._setup_interval_plotting(
plot_pred_int, prediction_intervals, credible_intervals, figsizeinches)
# setup display settings
interval_display, model_display, data_display = self._setup_display_settings(
interval_display, model_display, data_display)
# Define colors
cicolor, picolor = self._setup_interval_colors(nn=nn, prediction_intervals=prediction_intervals)
# initialize figure handle
fighandle = []
axhandle = []
for ii in range(self.__ndatabatches):
credlims = credible_intervals[ii] # should be ny lists inside
ny = len(credlims)
# extract data
dataii = self.datapred[ii]
# define independent data
time = dataii.xdata[0].reshape(dataii.xdata[0].shape[0], )
for jj in range(ny):
htmp, ax = self._initialize_plot_features(ii=ii, jj=jj, ny=ny, figsizeinches=figsizeinches)
fighandle.append(htmp)
axhandle.append(ax)
# add prediction intervals - if applicable
if prediction_intervals is not None:
ax.fill_between(time, prediction_intervals[ii][jj][0], prediction_intervals[ii][jj][-1],
facecolor=picolor[0], label=plabels[0], **interval_display)
# add range of credible intervals - if applicable
for kk in range(0, int(nn)-1):
ax.fill_between(time, credlims[jj][kk], credlims[jj][-kk - 1],
facecolor=cicolor[kk], label=clabels[kk], **interval_display)
# add model (median parameter values)
ax.plot(time, credlims[jj][int(nn)-1], **model_display)
# add data to plot
if adddata is True:
plt.plot(dataii.xdata[0], dataii.ydata[0][:, jj], | |
value_new
def categorize_vdaoprimet(value):
if value == 1:
value_new = "Bicuspid valve disease"
elif value == 2:
value_new = "Congenital (other than bicuspid)"
elif value == 3:
value_new = "Degenerative- Calcified"
elif value == 4:
value_new = "Degenerative- Leaflet prolapse with or without annular dilatation"
elif value == 5:
value_new = "Degenerative- Pure annular dilatation without leaflet prolapse"
elif value == 6:
value_new = "Degenerative - Commissural Rupture"
elif value == 7:
value_new = "Degenerative - Extensive Fenestration"
elif value == 8:
value_new = "Degenerative - Leaflet perforation / hole"
elif value == 9:
value_new = "Endocarditis with root abscess"
elif value == 10:
value_new = "Endocarditis without root abscess"
elif value == 11:
value_new = "LV Outflow Tract Pathology, HOCM"
elif value == 12:
value_new = "LV Outflow Tract Pathology, Sub-aortic membrane"
elif value == 13:
value_new = "LV Outflow Tract Pathology, Sub-aortic Tunnel"
elif value == 14:
value_new = "LV Outflow Tract Pathology, Other"
elif value == 15:
value_new = "Primary Aortic Disease, Aortic Dissection"
elif value == 16:
value_new = "Primary Aortic Disease, Atherosclerotic Aneurysm"
elif value == 17:
value_new = "Primary Aortic Disease, Ehler-Danlos Syndrome"
elif value == 18:
value_new = "Primary Aortic Disease, Hypertensive Aneurysm"
elif value == 19:
value_new = "Primary Aortic Disease, Idiopathic Root dilatation"
elif value == 20:
value_new = "Primary Aortic Disease, Inflammatory"
elif value == 21:
value_new = "Primary Aortic Disease, Loeys-Dietz Syndrome"
elif value == 22:
value_new = "Primary Aortic Disease, Marfan Syndrome"
elif value == 23:
value_new = "Primary Aortic Disease, Other Connective tissue disorder"
elif value == 24:
value_new = "Reoperation - Failure of previous AV repair or replacement"
elif value == 25:
value_new = "Rheumatic"
elif value == 26:
value_new = "Supravalvular Aortic Stenosis"
elif value == 27:
value_new = "Trauma"
elif value == 28:
value_new = "Tumor, Carcinoid"
elif value == 29:
value_new = "Tumor, Myxoma"
elif value == 30:
value_new = "Tumor, Papillary Fibroelastoma"
elif value == 31:
value_new = "Tumor, Other"
elif value == 32:
value_new = "Mixed Etiology"
elif value == 33:
value_new = "Not documented"
else:
value_new = ""
return value_new
def categorize_incidenc(value):
if value == 1:
value_new = "First cardiovascular surgery"
elif value == 2:
value_new = "First re-op cardiovascular surgery"
elif value == 3:
value_new = "Second re-op cardiovascular surgery"
elif value == 4:
value_new = "Third re-op cardiovascular surgery"
elif value == 5:
value_new = "Fourth or more re-op cardiovascular surgery"
elif value == 6:
value_new = "NA - Not a cardiovascular surgery"
else:
value_new = ""
return value_new
def categorize_status(value):
if value == 1:
value_new = "Elective"
elif value == 2:
value_new = "Urgent"
elif value == 3:
value_new = "Emergent"
elif value == 4:
value_new = "Emergent Salvage"
else:
value_new = ""
return value_new
def categorize_optime(value):
if value == 1:
value_new = "Preop"
elif value == 2:
value_new = "Intraop"
elif value == 3:
value_new = "Postop"
else:
value_new = ""
return value_new
def categorize_ecmowhen(value):
if value == 1:
value_new = "Preop"
elif value == 2:
value_new = "Intraop"
elif value == 3:
value_new = "Postop"
elif value == 4:
value_new = "Non-operative"
else:
value_new = ""
return value_new
def categorize_planned(value):
if value == 3:
value_new = "Yes, planned"
elif value == 4:
value_new = "Yes, unplanned due to surgical complication"
elif value == 5:
value_new = "Yes, unplanned due to unsuspected disease or anatomy"
elif value == 2:
value_new = "No"
else:
value_new = ""
return value_new
def categorize_ocaracd(value):
if value == 2:
value_new = "Permanent Pacemaker"
elif value == 3:
value_new = "Permanent Pacemaker with Cardiac Resynchronization Technique (CRT)"
elif value == 4:
value_new = "Implantable Cardioverter Defibrillator (ICD)"
elif value == 5:
value_new = "ICD with CRT"
elif value == 6:
value_new = "Implantable recorder"
elif value == 1:
value_new = "None"
else:
value_new = ""
return value_new
if __name__ == "__main__":
print_features_str = False
# Specify path to directory with CSVs to analyze
fpath = os.path.expanduser(
"~/Dropbox (Partners HealthCare)/cardiac_surgery_ml/sts_calculator",
)
# Initialize dict to save final predicted outcomes
outcomes = {}
# Specify CSV file
fpath_csv = os.path.join(fpath, "cabgvalve1_cabgvalveothers2.csv")
# Define procedure calculator to use
procid_defined = "Isolated CABG"
# Read CSV into Pandas dataframe
df = pd.read_csv(fpath_csv, low_memory=False)
# Define keys to keep
keys_to_keep = [
"age",
"gender",
"raceblack",
"raceasian",
"ethnicity",
"racenativeam",
"racnativepacific",
"payorprim",
"payorsecond",
"weightkg",
"heightcm",
"diabetes",
"diabctrl",
"hct",
"wbc",
"platelets",
"creatlst",
"dialysis",
"hypertn",
"immsupp",
"pvd",
"cvd",
"cvdtia",
"cva",
"cvawhen",
"cvdstenrt",
"cvdstenlft",
"cvdpcarsurg",
"mediastrad",
"cancer",
"fhcad",
"slpapn",
"liverdis",
"unrespstat",
"syncope",
"diabetes",
"diabctrl",
"chrlungd",
"ivdrugab",
"alcohol",
"pneumonia",
"tobaccouse",
"hmo2",
"prcvint",
"prcab",
"prvalve",
"prvalveproc1",
"prvalveproc2",
"prvalveproc3",
"prvalveproc4",
"prvalveproc5",
"poc",
"pocint1",
"pocint2",
"pocint3",
"pocint4",
"pocint5",
"pocint6",
"pocint7",
"pocpci",
"pocpciwhen",
"pocpciin",
"miwhen",
"heartfailtmg",
"classnyh",
"cardsymptimeofadm",
"carshock",
"arrhythatrfib",
"arrhythafib",
"arrhythaflutter",
"arrhyththird",
"arrhythsecond",
"arrhythsss",
"arrhythvv",
"medinotr",
"medadp5days",
"medadpidis",
"medacei48",
"medster",
"medgp",
"resusc",
"numdisv",
"pctstenlmain",
"hdef",
"pctstenproxlad",
"vdstena",
"vdstenm",
"vdinsufa",
"vdinsufm",
"vdinsuft",
"vdaoprimet",
"incidenc",
"status",
"vstrrepair",
"iabpwhen",
"cathbasassistwhen",
"ecmowhen",
"procid",
]
# Subset dataframe to just the keys we want to keep
df = df[keys_to_keep]
# Loop through each row in the dataframe
for row in range(df.shape[0]):
if row > -1:
print("Predicting STS risk scores for patient %1.0f" % row)
# Convert row into dict
data_row = df.iloc[row].to_dict()
# Initialize input features string
features_str = "{"
# Iterate through every key-value pair in the row
for key, value in data_row.items():
# Append the key to features_str
features_str += '"' + key + '":'
# Initialize value_new
value_new = ""
# Process the value depending on key
if key == "gender":
if value == 1:
value_new = "Male"
else:
value_new = "Female"
elif key == "procid":
value_new = categorize_procid(procid_defined)
elif key == "raceblack":
value_new = categorize_binary(value)
elif key == "raceasian":
value_new = categorize_binary(value)
elif key == "racenativeam":
value_new = categorize_binary(value)
elif key == "racnativepacific":
value_new = categorize_binary(value)
elif key == "ethnicity":
value_new = categorize_ethnicity(value)
elif key == "payorprim":
value_new = categorize_payor(value)
elif key == "payorsecond":
value_new = categorize_payor(value)
elif key == "platelets":
value_new = categorize_int(value)
elif key == "dialysis":
value_new = categorize_binary(value)
elif key == "hypertn":
value_new = categorize_binary(value)
elif key == "immsupp":
value_new = categorize_binary(value)
elif key == "pvd":
value_new = categorize_binary(value)
elif key == "cvd":
value_new = categorize_binary(value)
elif key == "cvdtia":
value_new = categorize_binary(value)
elif key == "cva":
value_new = categorize_binary(value)
elif key == "cvawhen":
if value == 3:
value_new = "<= 30 days"
elif value == 4:
value_new = "> 30 days"
else:
value_new = ""
elif key == "cvstenrt":
if value == 3:
value_new = "50% to 79%"
elif value == 1:
value_new = "80% to 99%"
elif value == 2:
value_new = "100 %" # This space is not a typo
else: # value == 4
value_new = "Not documented"
elif key == "cvstenlft":
if value == 3:
value_new = "50% to 79%"
elif value == 1:
value_new = "80% to 99%"
elif value == 2:
value_new = "100 %" # This space is not a typo
else: # value == 4
value_new = "Not documented"
elif key == "<KEY>":
value_new = categorize_binary(value)
elif key == "mediastrad":
value_new = categorize_binary(value)
elif key == "cancer":
value_new = categorize_binary(value)
elif key == "fhcad":
value_new = categorize_binary(value)
elif key == "slpapn":
value_new = categorize_binary(value)
elif key == "liverdis":
value_new = categorize_binary(value)
elif key == "unrespstat":
value_new = categorize_binary(value)
elif key == "syncope":
value_new = categorize_binary(value)
elif key == "diabetes":
value_new = categorize_binary(value)
elif key == "diabctrl":
if value == 1:
value_new = "None"
elif value == 2:
value_new = "Diet only"
elif value == 3:
value_new = "Oral"
elif value == 4:
value_new = "Insulin"
elif value == 6:
value_new = "Other subcutaneous medication"
else: # value == 7:
value_new = "Unknown"
elif key == "chrlungd":
if value == 1:
value_new = "No"
elif value == 2:
value_new = "Mild"
elif value == 3:
value_new = "Moderate"
elif value == 4:
value_new = "Severe"
| |
## CPPN functions and classes
# Imports
import math
import numpy
from evolve_soft_2d import utility
################################################################################
class cppn:
"""The CPPN class object
"""
def __init__(
self,
seed: int,
mod_n: int,
scale: float,
hl_n: int,
hl_s: int,
thresh: float,
x: int,
y: int,
) -> None:
"""The CPPN parameters
Parameters
----------
seed : int
The seed for the random generation
mod_n : int
The number of models to be generated from a particular seed
scale : float
The scale of the focus on the model
hl_n : int
The number of hidden layers
hl_s : int
The size of the initial hidden layer
thresh : float
The rounding/removal threshold
x : int
The number of elements in the x-direction
y : int
The number of elements in the y-direction
"""
self.seed = seed
self.mod_n = mod_n
self.scale = scale
self.hl_n = hl_n
self.hl_s = hl_s
self.thresh = thresh
self.x = x
self.y = y
# The resolution of the grid
self.res = self.x*self.y
# Build the grid
self.grid = self.cppn_grid()
def __repr__(self) -> str:
"""Format a representation of the CPPN
Returns
-------
str
Formatted representation of the CPPN for the log
"""
r = "Model Dimensions: {}x{} elements\n".format(self.x, self.y)
r += "Model Seed: {}\n".format(self.seed)
r += "Number Of Models Generated: {}\n".format(self.mod_n)
r += "Model Scale: 1:{}\n".format(self.scale)
r += "Number Of Hidden Layers: {}\n".format(self.hl_n)
r += "Size Of Initial Hidden Layer: {}\n".format(self.hl_s)
if self.thresh < 1:
r += "Rounding Threshold: {}\n".format(self.thresh)
else:
r += "Percentage Of Elements Removed: {}%\n".format(self.thresh)
r += "Activation Functions:\n"
for i in self.af:
r += "{}\n".format(i)
return r
def cppn_grid(self) -> numpy.array:
"""Generates model grids
Returns
-------
numpy.array
The model grid
"""
# Initialisations
self.af = []
# The list of possible activation functions
af_l = [self.cppn_sin, self.cppn_cos, self.cppn_tanh, self.cppn_sigm, self.cppn_srel]
af_o = [self.cppn_sigm, self.cppn_srel]
# Set the random generation seed
numpy.random.seed(seed = self.seed)
# Generate the initial hidden layer for each model
hl = numpy.random.uniform(low = -1, high = 1, size = (self.mod_n, self.hl_s)).astype(numpy.float32)
# Generate the grid matrix
x_r = numpy.linspace(-1*self.scale, self.scale, num = self.x)
x_m = numpy.matmul(numpy.ones((self.y, 1)), x_r.reshape((1, self.x)))
y_r = numpy.linspace(-1*self.scale, self.scale, num = self.y)
y_m = numpy.matmul(y_r.reshape((self.y, 1)), numpy.ones((1, self.x)))
r_m = numpy.sqrt(x_m*x_m + y_m*y_m)
x_d = numpy.tile(x_m.flatten(), self.mod_n).reshape(self.mod_n, self.res, 1)
y_d = numpy.tile(y_m.flatten(), self.mod_n).reshape(self.mod_n, self.res, 1)
r_d = numpy.tile(r_m.flatten(), self.mod_n).reshape(self.mod_n, self.res, 1)
# Scale the initial hidden layers
hl_scale = numpy.reshape(hl, (self.mod_n, 1, self.hl_s))*numpy.ones((self.res, 1), dtype = numpy.float32)*self.scale
# Unwrap the grid matrices
x_d_unwrap = numpy.reshape(x_d, (self.mod_n*self.res, 1))
y_d_unwrap = numpy.reshape(y_d, (self.mod_n*self.res, 1))
r_d_unwrap = numpy.reshape(r_d, (self.mod_n*self.res, 1))
hl_unwrap = numpy.reshape(hl_scale, (self.mod_n*self.res, self.hl_s))
# Build the network
n = self.fully_connected(hl_unwrap, self.hl_n, True, self.seed) + self.fully_connected(x_d_unwrap, self.hl_n, False, self.seed + 1) + self.fully_connected(y_d_unwrap, self.hl_n, False, self.seed + 2) + self.fully_connected(r_d_unwrap, self.hl_n, False, self.seed + 3)
# Transpose the network
n = n.T
if self.hl_n > 1:
# Loop through the second to second-last hidden layers
for i in range(1, self.hl_n - 1):
# Set the seed for each layer
numpy.random.seed(seed = self.seed + i)
# Select and record the activation function
n[i], af_c = numpy.random.choice(af_l)(n[i - 1])
self.af.append(af_c)
# Set the seed for the final layer
numpy.random.seed(seed = self.seed)
# Apply and record the final function
n[-1], af_o = numpy.random.choice(af_o)(n[-2])
self.af.append(af_o)
else:
# Set the seed for each layer
numpy.random.seed(seed = self.seed)
# Select and record the activation function
n[0], af_c = numpy.random.choice(af_l)(n[0])
self.af.append(af_c)
# Apply and record the final function
n[0], af_o = numpy.random.choice(af_o)(n[0])
self.af.append(af_o)
# Reshape the grid to fit the given dimensions
mod = numpy.reshape(n[-1], (self.mod_n, self.x, self.y))
return mod
def fully_connected(
self,
i_v: numpy.array,
o_d,
w_bias: bool,
seed: int,
) -> numpy.array:
"""Connect all layers of the CPPN
Parameters
----------
i_v : numpy.array
The input vector
o_d
The output dimensions
seed : int
The random generation
w_bias : bool
If the layers should be connected with bias
Returns
-------
numpy.array
The connected results
"""
# Set the random generation seed
numpy.random.seed(seed = seed)
# Generate the random matrix
m = numpy.random.standard_normal(size = (i_v.shape[1], o_d)).astype(numpy.float32)
# Multiply the input with the matrix
result = numpy.matmul(i_v, m)
# Check if the bias must be included
if w_bias:
# Generate the random bias
bias = numpy.random.standard_normal(size = (1, o_d)).astype(numpy.float32)
# Add the bias to the result
result += bias*numpy.ones((i_v.shape[0], 1), dtype = numpy.float32)
return result
def partly_connected(
self,
i_v: numpy.array,
o_d,
w_bias: bool,
seed: int,
) -> numpy.array:
"""Connect a single layer of the hidden network
Parameters
----------
i_v : numpy.array
The input vector
o_d
The dimensions of the output
w_bias : bool
The random generation
seed : int
If the layers should be connected with bias
Returns
-------
numpy.array
The connected results
"""
# Set the random generation seed
numpy.random.seed(seed = seed)
# Generate the random matrix
m = numpy.random.standard_normal(size = (i_v.shape[0], o_d)).astype(numpy.float32)
# Multiply the input with the matrix
result = numpy.matmul(i_v, m)
# Check if the bias must be included
if w_bias:
# Generate the random bias
bias = numpy.random.standard_normal(size = (o_d)).astype(numpy.float32)
# Add the bias to the result
result += bias.T
return result
def cppn_sin(
self,
hl: numpy.array
) -> (numpy.array, str):
"""Apply sin as the activation function for the current layer
Parameters
----------
hl : numpy.array
The current layer
Returns
-------
numpy.array, str:
The new layer
The label of the activation function
"""
name = "sin"
out = numpy.sin(self.partly_connected(hl, self.res*self.mod_n, True, self.seed))
return out, name
def cppn_cos(
self,
hl: numpy.array
) -> (numpy.array, str):
"""Apply cos as the activation function for the current layer
Parameters
----------
hl : numpy.array
The current layer
Returns
-------
numpy.array, str:
The new layer
The label of the activation function
"""
name = "cos"
out = numpy.cos(self.partly_connected(hl, self.res*self.mod_n, True, self.seed))
return out, name
def cppn_tanh(
self,
hl: numpy.array
) -> (numpy.array, str):
"""Apply tanh as the activation function for the current layer
Parameters
----------
hl : numpy.array
The current layer
Returns
-------
numpy.array, str:
The new layer
The label of the activation function
"""
name = "tanh"
out = numpy.tanh(self.partly_connected(hl, self.res*self.mod_n, True, self.seed))
return out, name
def cppn_sigm(
self,
hl: numpy.array
) -> (numpy.array, str):
"""Apply a sigmoid as the activation function for the current layer
Parameters
----------
hl : numpy.array
The current layer
Returns
-------
numpy.array, str:
The new layer
The label of the activation function
"""
name = "sigmoid"
out = utility.sigmoid(self.partly_connected(hl, self.res*self.mod_n, True, self.seed))
return out, name
def cppn_srel(
self,
hl: numpy.array
) -> (numpy.array, str):
"""Apply smooth ReLu as the activation function for the current layer
Parameters
----------
hl : numpy.array
The current layer
Returns
-------
numpy.array, str:
The new layer
The label of the activation function
"""
name = "smooth ReLu"
out = utility.smooth_relu(self.partly_connected(hl, self.res*self.mod_n, True, self.seed))
return out, name
################################################################################
class cppn_i:
"""The CPPN model
"""
def __init__(
self,
cppn: cppn,
mod_id: int,
) -> None:
"""The CPPN model parameters
Parameters
----------
cppn : cppn
The CPPN
mod_id : int
The model number
"""
self.cppn = cppn
self.mod_id = mod_id
# The model grid
self.grid = self.rem_thresh(self.cppn.grid[self.mod_id])
def __repr__(self) -> str:
"""Format a representation of the CPPN model
Returns
-------
str
Formatted representation of the CPPN model for the log
"""
r = "Model ID: {}\n".format(self.mod_id)
r += "CPPN Parameters:\n{}".format(self.cppn)
return r
def rem_thresh(
self,
grid: numpy.array
) -> numpy.array:
"""Removes elements from a grid according to the specified threshold
Parameters
----------
grid : numpy.array
The grid from which elements are to be removed
Returns
-------
numpy.array
The grid with the elements removed
"""
# Check if the threshold indicates that a percentage of elements should be removed
if self.cppn.thresh > 1:
# Calculate the percentage as a decimal
perc = self.cppn.thresh/100
# Calculate the number of elements to be removed
b = int(math.ceil(self.cppn.x*self.cppn.y*perc))
if b == self.cppn.x*self.cppn.y:
# Reshape the grid to be one-dimensional
grid = numpy.zeros((self.cppn.x, self.cppn.y))
else:
# Obtain the IDs of the elements to | |
from Game.player import Player
from pygame import *
from Game.const import *
class Story:
""" Story line class """
def __init__(self, message, treasure, player, screen, fade, maps, sound):
self.screen = screen
self.message = message
self.treasure = treasure
self.player = player
self.fade = fade
self.maps = maps
self.sound = sound
# just for testing remove later
self.treasure.collectedItems.add(self.treasure.items['boat'][0])
self.selectedFirstLocation="rochelle"
self.mainWorldMsgFinished = False
self.gotWorldMap = False
self.BurntHouseMsgFinished = False
self.LabHouseMsgFinished = False
self.islandMsgFinished = False
self.passwordMsgFinished=False
self.shipCorridorMsgFinished = False
self.shipCabinMsgFinished = False
# Flag to see if game is over (player won)
self.gameWon = False
self.letter1 = transform.scale(image.load("resources/graphics/items/letter1_preview_rev_1.png"),(70,70))
self.letter2 = transform.scale(image.load("resources/graphics/items/letter1_preview_rev_1.png"),(70,70))
self.brochure = transform.scale(image.load("resources/graphics/items/brochure.png"),(80,80))
self.worldMap = transform.scale(image.load("resources/graphics/map/null.png"),(70,70))
self.key = transform.scale(image.load("resources/graphics/items/key.png"),(70,70))
self.laptop = transform.scale(image.load("resources/graphics/items/laptop.png"),(160,130))
self.testtube = transform.scale(image.load("resources/graphics/items/testtube.png"),(70,70))
self.microscope = transform.scale(image.load("resources/graphics/items/microscope.png"),(70,70))
self.chestbox = transform.scale(image.load("resources/graphics/items/chest.png"),(70,70))
# List of all available items (name -> description -> position -> cost -> rect)
self.healthPotion = transform.scale(image.load("resources/graphics/items/healthPotion.png"), (70,70))
# List of all available items (name -> description -> position -> cost -> rect)
self.availableItems = {
# "speedBoots" : [["These are the boots of Hermes.", "Legend says they increase your speed."], (156,135), 30, Rect(153,133,70,70)],
# "earthGem" : [["Some sort of shining gem.", "It seems useless..."], (876,270), 200, Rect(864,262,self.earthGemImage.get_width()*2,self.earthGemImage.get_height()*2)],
"healthPotion" : [["Potion to increase your health by 20."], (509,419), 50, Rect(509,419,70,70)],
# "newPrayer" : [["New prayer to use at the church.", "You have %s prayers."%str(self.prayers)], (132,336), 100, Rect(132,336,100,100)],
"brochure" : [[""], (876,270), 200, Rect(865,270,70,70)],
"letter1" : [["Dr.Gwen says to Dr.Nevlin, ' I fear the zombie virus is far", "deadlier than we ever imagined. I have many unconfirmed reports , but", "there is no point spreading panic.'"], (676,250), 200, Rect(664,242,100,100)],
"letter2" : [["You pick up Dr. Nevlin�s letter. 'Hope you are safe in the bunker.I ", "am working on the cure in our lab in Teshlor. I'm close.. The rest", "of it is gibberish - NEVLIN written repeatedly."],(132,336), 100, Rect(132,336,100,100)],
"worldMap" : [[""],(240,400), 100, Rect(240,400,70,70)],
"key" : [[""], (429,339), 30, Rect(429,339,70,70)],
"laptop" : [[""], (825,185), 200, Rect(825,185,100,100)],
"testtube" : [[""], (123.5,464), 200, Rect(123.5,464,70,70)],
"microscope" : [[""], (40.5,410), 200, Rect(40.5,410,70,70)],
"chestbox" : [["treasure box."], (541,46), 200, Rect(530,35,80,80)]
}
# Reuturn rect
self.shopReturn = Rect(833,508,300,300)
# -----------------------------------
# Keyboard actions
self.spaceReady = False
self.returnReady = False
self.pReady = False
def intro(self, next):
""" Introduction """
# Only do the narration scene once
if not self.mainWorldMsgFinished:
self.message.narration(["Clearly, this hideout has been deserted for quite some time.",\
"Who was hiding.. And from what?",\
], next, "top")
if self.message.done:
self.mainWorldMsgFinished = True
if not mac:
mixer.music.fadeout(500)
mixer.music.load(self.sound.getMusic("mainWorldTheme"))
mixer.music.play(loops=-1)
self.message.reset()
def hideout(self, click):
""" Main hideout """
pos = mouse.get_pos()
def msg(text):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (255,49))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(1500)
# Blit background
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
# Loop through the dictionary and draw the items
for key,val in self.availableItems.items():
if key == "letter1":
# Animate gem shine
self.screen.blit(self.letter1, val[1])
if key == "letter2":
# Animate gem shine
self.screen.blit(self.letter2, val[1])
# General description
# Loop through items
for item in [
["letter1", Rect(864,262,self.letter1.get_width()*2,self.letter1.get_height()*2)],
["letter2", Rect(864,262,self.letter2.get_width()*2,self.letter2.get_height()*2)]
]:
if not item[1].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render("Hover over an item to view its description.", True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render("Click on it to collect it.", True, (0,0,0)), (245,90))
else:
if not item[0] in self.availableItems:
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render("Hover over item for its description.", True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render("Click on it to collect it.", True, (0,0,0)), (245,90))
if "letter1" in self.availableItems:
if self.availableItems["letter1"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["letter1"][0][0], True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render(self.availableItems["letter1"][0][1], True, (0,0,0)), (245,90))
self.screen.blit(self.message.font.render(self.availableItems["letter1"][0][2], True, (0,0,0)), (245,140))
#self.screen.blit(self.message.font.render("$ %s"%str(self.availableItems["brochure"][2]), True, (255,255,255)), (515,532))
if click:
# Add item to inventory
self.treasure.collectedItems.add("letter1")
# Increase the player speed in all maps
# Remove item from dictionary
self.availableItems.pop("letter1", None)
if "letter2" in self.availableItems:
if self.availableItems["letter2"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,150)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["letter2"][0][0], True, (0,0,0)), (245,40))
self.screen.blit(self.message.font.render(self.availableItems["letter2"][0][1], True, (0,0,0)), (245,90))
self.screen.blit(self.message.font.render(self.availableItems["letter2"][0][2], True, (0,0,0)), (245,140))
#self.screen.blit(self.message.font.render("$ %s"%str(self.availableItems["brochure"][2]), True, (255,255,255)), (515,532))
if click:
# Add item to inventory
self.treasure.collectedItems.add("letter2")
# Increase the player speed in all maps
# Remove item from dictionary
self.availableItems.pop("letter2", None)
if self.shopReturn.collidepoint(pos) and click:
# Fade into main world
self.fade.fadeDark(self.maps.allScenes["mainWorld"][0], self.screen, self.player.mapCoords["mainWorld"])
# Create new scene
self.maps.newScene("mainWorld")
# Set player coordinates
self.player.x = self.player.mapx+9311
self.player.y = self.player.mapy+2168
# Reset fade
self.fade.reset()
# Change music
if not mac:
mixer.music.fadeout(500)
mixer.music.load(self.sound.getMusic("mainWorldTheme"))
mixer.music.play(loops=-1)
def shipCorridor(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Only do the narration scene once
if not self.shipCorridorMsgFinished:
self.message.narration(["You want answers and the only way to get them is get up and explore."], next, "top")
if self.message.done:
self.shipCorridorMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "brochure":
self.screen.blit(self.brochure, val[1])
break
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "brochure" in self.availableItems:
if self.availableItems["brochure"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["brochure"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("brochure")
self.availableItems.pop("brochure", None)
# Notification
msg("It's a brochure about some ship... The Black Pearl!", 3000)
msg("Someone has scrawled, �CELL HERO� on it.", 3000)
msg("Is it a hint?", 3000)
def shipCabin(self, next):
""" Main surprise temple """
mousePos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
if not self.shipCabinMsgFinished:
self.message.narration(["Looks like a control room of sorts"], next, "top")
if self.message.done:
self.shipCabinMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "worldMap":
self.screen.blit(self.worldMap, val[1])
pos=[self.player.x,self.player.y]
# Speed boots
if "worldMap" in self.availableItems:
if self.availableItems["worldMap"][3].collidepoint(pos):
# Word wrap text
self.gotWorldMap = True
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["worldMap"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("worldMap")
self.availableItems.pop("worldMap", None)
# Notification
msg("This is no ordinary map.", 2000)
msg("It�s as though someone has marked on it... just for you.", 3000)
msg("As you read it, it is stored in the hard-disk of your memory.", 3000)
msg("Activate the map by pressing the map button on the right.", 3000)
msg("The ship demands the location of your first stop.", 3000)
msg("What is it?", 2000)
# while(not self.selectedFirstLocation):
# self.selectedFirstLocation=self.message.firstLocationConfirm(click)
# display.flip()
#select first location here
def BurntHouse(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Only do the narration scene once
if not self.BurntHouseMsgFinished:
self.message.narration(["Acccchhhooo! You start coughing and sneezing as soon as you enter.",
"The smell of burnt wood and ash is too strong.",
"Maybe you will find something useful in the ruins?"
], next, "top")
if self.message.done:
self.BurntHouseMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "key":
self.screen.blit(self.key, val[1])
if key == "laptop":
self.screen.blit(self.laptop, val[1])
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "key" in self.availableItems:
if self.availableItems["key"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["key"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("key")
self.availableItems.pop("key", None)
# Notification
msg("The key is too light for its size (titanium, atomic number 22).", 3000)
msg("It has a striped pattern(barcode signature), you think.", 3000)
msg("Now how did you know that?", 2000)
#print("pos 2 ")
#print(pos)
# Earth gem
if "laptop" in self.availableItems:
if self.availableItems["laptop"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(self.availableItems["laptop"][0][0], True, (0,0,0)), (275,59))
self.treasure.collectedItems.add("laptop")
self.availableItems.pop("laptop", None)
# Notification
msg("Your cyborg nature acts instinctively.", 2500)
msg("You retrieve the hard-disk and connect it to your brain.", 3000)
msg("Alas, most sectors are damaged and you see only random noise.", 3000)
msg("A lone grainy video plays.", 2000)
msg("�Frequent zombie attacks o--- coast...�", 2000)
msg("�high infection rate in h--- Aquesta...�, a news reporter is saying.", 3000)
msg("Aquesta. The name triggers something.", 2000)
msg("All around him, there is rubble.", 2500)
msg("People are running and screaming.", 2500)
msg("You just realise you haven�t seen another person in days.", 3000)
msg("Did the zombies kill everyone else ?", 2500)
def Lab(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,200)), (229,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (245,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
# Only do the narration scene once
if not self.LabHouseMsgFinished:
self.message.narration(["You have a sense of deja vu. ",
"Yes, you had come here with Dr. Gwen!"
], next, "top")
if self.message.done:
self.LabHouseMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == "testtube":
self.screen.blit(self.testtube, val[1])
if key == "microscope":
self.screen.blit(self.microscope, val[1])
pos=[self.player.x,self.player.y]
#pos=(x,y)
# Speed boots
if "testtube" in self.availableItems:
if self.availableItems["testtube"][3].collidepoint(pos):
# Word wrap text
self.screen.blit(transform.scale(self.message.background, (600,150)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["testtube"][0][0], True, (0,0,0)), (255,59))
#self.screen.blit(self.message.font.render(self.availableItems["key"][0][1], True, (0,0,0)), (275,109))
#self.screen.blit(self.message.font.render("$ %s"%str(self.availableItems["speedBoots"][2]), True, (255,255,255)), (515,532))
self.treasure.collectedItems.add("testtube")
self.availableItems.pop("testtube", None)
# Notification
msg("These test tubes are strangely familiar�", 3000)
msg("You remember now, they are yours!", 3000)
msg("Yes, you used to work here before as a researcher.", 3000)
msg("Your name is <NAME>.", 2000)
msg("Dr. Gwen and Dr. Nevlin were your colleagues and best friends.", 3000)
msg("You recall everything right upto your accident. ", 3000)
msg("Aha! Your friends made you a cyborg to save your life. ", 3000)
msg("You must have been on the boat to get better treatment in Rochelle.", 3000)
msg("They left behind the clues in case they didn�t survive.", 3000)
#print("pos 2 ")
#print(pos)
if "microscope" in self.availableItems:
if self.availableItems["microscope"][3].collidepoint(pos):
self.screen.blit(transform.scale(self.message.background, (600,150)), (229,30))
self.screen.blit(self.message.font.render(self.availableItems["microscope"][0][0], True, (0,0,0)), (255,59))
self.treasure.collectedItems.add("microscope")
self.availableItems.pop("microscope", None)
# Notification
msg("You peer through the microscope, observing the virus strains.", 3000)
msg("You created them here.", 2000)
msg("You had a rare gene that made you immune. ", 3000)
msg("There was a mutation in your experiment and... ", 3000)
msg("the zombie virus leaked out. Now everyone is gone.", 2000)
msg("A wave of shame washes over you. ", 2500)
msg("But wait, weren�t you trying to make the cure as well?", 3000)
msg("Where is it? ", 2000)
def dungeon(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(1600)
def finalisland(self, next):
""" Main surprise temple """
#pos = mouse.get_pos()
def msg(text, length):
""" Render message """
self.screen.blit(transform.scale(self.message.background, (600,150)), (259,30))
self.screen.blit(self.message.font.render(text, True, (0,0,0)), (275,59))
self.treasure.render(True, False, False, False, self.message)
# Render and pause
display.flip()
time.wait(length)
if not self.islandMsgFinished:
self.message.narration(["The cure is inside.",
" In order to access it, ",
" you must use the password."
], next,"bottom")
if self.message.done:
self.islandMsgFinished = True
self.message.reset()
for key,val in self.availableItems.items():
if key == | |
# (c) Copyright 2018 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import abort
from flask import Blueprint
from flask import copy_current_request_context
from flask import jsonify
from flask import request
import os
from oslo_config import cfg
from oslo_log import log as logging
import time
import yaml
from . import model as model_api
from . import playbooks
from . import policy
from . import versions
LOG = logging.getLogger(__name__)
bp = Blueprint('servers', __name__)
CONF = cfg.CONF
@bp.route("/api/v2/servers/process", methods=['POST'])
@policy.enforce('lifecycle:run_playbook')
def add_server():
"""Add compute node
Adds a compute node by performing the following steps:
- update the customer model
- commit changes to the model
- run the config processor playbook
- run the ready deployment playbook
- run the site playbook
- run the monasca-deploy playbook
.. :quickref: Server; Add compute node
**Example Request**:
The request contains two objects: a `server` object containing the
information to be stored in the input model, and a `process` object
containing values relevant to the process of adding a new server.
.. sourcecode:: http
POST /api/v2/servers/process HTTP/1.1
Content-Type: application/json
{
"server" : {
"id": "ID"
},
"process" : {
"encryption-key": "somekey",
"commitMessage": "Adding a new server"
}
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202 ACCEPTED
Content-Type: application/json
Location: http://localhost:9085/api/v2/plays/6858
{
"id": 6858
}
**Changed for v2**:
The `limitToId` field from the `process` object is no longer used. It
was used as a way to optionally supply the `--limit` parameter to certain
playbooks, and it was *always* being supplied by the callers (because it
makes no sense NOT to use it). The `--limit` parameter will now
automatically be supplied.
"""
body = request.get_json()
# Extract the keys of interest from the request body and normalize the
# request arguments
keys = ('commitMessage',
'encryptionKey',
'encryption-key')
opts = pick(body.get('process'), keys)
# rekey is always blank because we will never rekey anything
opts['extra-vars'] = {
'encrypt': '',
'rekey': ''
}
if 'encryptionKey' in opts:
opts['extra-vars']['encrypt'] = opts.pop('encryptionKey')
elif 'encryption-key' in opts:
opts['extra-vars']['encrypt'] = opts.pop('encryption-key')
try:
server_id = body['server']['id']
except KeyError:
abort(400, 'Server id missing')
if 'commitMessage' not in opts:
opts['commitMessage'] = 'Add server %s' % server_id
# get the model
model = model_api.read_model()
servers = model['inputModel']['servers']
# Make sure the server does not already exist in the model
if server_id in [s['id'] for s in servers]:
abort(400, 'Server %s already exists' % server_id)
servers.append(body['server'])
model_api.write_model(model)
# commit the model
versions.commit_model(message=opts['commitMessage'])
play_id = int(1000 * time.time())
# The following local functions are all steps that will be run
# asynchronously in a series of promises. In (some) other languages that
# support promises, code blocks can be entered directly as an argument
# to the 'then' function, but this is not easily done in python. The
# closest thing to them might be multi-line lambda functions, but these
# are intentionally unsupported in python:
# http://www.artima.com/weblogs/viewpost.jsp?thread=147358
#
# For clarity, the functions will be defined in the same order that they
# are called. The @copy_current_request_context decorator provided
# by flask permits functions to access the http context supplied to the
# parent function.
@copy_current_request_context
def run_config_processor_playbook():
LOG.info("Running config processor playbook")
payload = pick(opts, ('extra-vars',))
result = playbooks.run_playbook('config-processor-run', payload,
play_id)
# return the entire result object, including the promise and
# other data
return result
@copy_current_request_context
def run_ready_deployment_playbook(prev):
LOG.info("Running ready deployment playbook")
result = playbooks.run_playbook('ready-deployment', play_id=play_id)
return result['promise']
@copy_current_request_context
def retrieve_hostname(prev):
LOG.info("Retrieving hostname from config processor output")
# Read the CP output and get the hostname
try:
filename = os.path.join(CONF.paths.cp_ready_output_dir,
'server_info.yml')
with open(filename) as f:
lines = f.readlines()
raw = ''.join(lines)
servers = yaml.safe_load(raw)
if server_id in servers:
if 'hostname' in servers[server_id]:
opts['limit'] = servers[server_id]['hostname']
else:
LOG.info('Server %s has no hostname so skipping --limit' %
server_id)
else:
LOG.info('Unable to locate server %s so skipping --limit' %
server_id)
except (OSError, IOError):
message = "Unable to read %s" % filename
LOG.error(message)
raise Exception(message)
except yaml.YAMLError:
# If the generated file is not valid yml, there is some problem
# with the config processor
message = "%s is not a valid yaml file" % filename
LOG.error(message)
raise Exception(message)
@copy_current_request_context
def run_site_playbook(prev):
LOG.info("Running site playbook")
# run site playbook, limited to the given hostname if possible
payload = pick(opts, ('encryption-key', 'limit'))
result = playbooks.run_playbook('site', payload, play_id)
return result['promise']
@copy_current_request_context
def generate_hosts_file(prev):
LOG.info("Generating hosts file")
payload = pick(opts, ('encryption-key', ))
payload['tags'] = 'generate_hosts_file'
result = playbooks.run_playbook('site', payload, play_id)
return result['promise']
@copy_current_request_context
def update_monasca(prev):
LOG.info("Running monasca-deploy playbook")
payload = pick(opts, ('encryption-key', ))
payload['tags'] = 'active_ping_checks'
result = playbooks.run_playbook('monasca-deploy', payload, play_id)
return result['promise']
@copy_current_request_context
def cleanup(prev):
LOG.info("Server successfully added")
@copy_current_request_context
def failure(e):
LOG.exception(e)
# Perform all asynchronous functions above in order. Capture the
# promise and other initial results from the first playbook launch in
# order to return that info immediately to the caller
result = run_config_processor_playbook()
result['promise'].then(run_ready_deployment_playbook) \
.then(retrieve_hostname) \
.then(run_site_playbook) \
.then(generate_hosts_file) \
.then(update_monasca) \
.then(cleanup) \
.catch(failure)
# Note: this returns *before* all of the asynchronus tasks are performed.
return jsonify({"id": result['id']}), 202, {'Location': result['url']}
@bp.route("/api/v2/servers/<id>/process", methods=['DELETE'])
@policy.enforce('lifecycle:run_playbook')
def remove_server(id):
"""Remove compute node
Remove a compute node by performing the following steps:
- update the customer model
- commit changes to the model
- run the config processor playbook
.. :quickref: Server; Remove compute node
**Example Request**:
The request contains contains a `process` object containing values relevant
to the process of deleting a server.
.. sourcecode:: http
DELETE /api/v2/servers/5935/process HTTP/1.1
Content-Type: application/json
{
"process" : {
"encryption-key": "somekey",
"commitMessage": "Deleting an old server"
}
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202 ACCEPTED
Content-Type: application/json
Location: http://localhost:9085/api/v2/plays/6858
{
"id": 6858
}
"""
try:
body = request.get_json()
except Exception:
body = {}
LOG.debug('DELETE server got empty json payload - this is probably ok')
# Extract the keys of interest from the request body and normalize the
# request arguments
keys = ('commitMessage',
'encryptionKey',
'encryption-key')
opts = pick(body, keys)
# rekey is always blank because we will never rekey anything
opts['extra-vars'] = {
'encrypt': '',
'rekey': '',
'remove_deleted_servers': 'y',
'free_unused_addresses': 'y'
}
if 'encryptionKey' in opts:
opts['extra-vars']['encrypt'] = opts.pop('encryptionKey')
elif 'encryption-key' in opts:
opts['extra-vars']['encrypt'] = opts.pop('encryption-key')
if 'commitMessage' not in opts:
opts['commitMessage'] = 'Remove server %s' % id
# get the model
model = model_api.read_model()
servers = model['inputModel']['servers']
# Make sure the server does not already exist in the model
if id not in [s['id'] for s in servers]:
abort(404, 'Server %s does not exist' % id)
# Filter out the server to delete
model['inputModel']['servers'] = [s for s in servers if s['id'] != id]
model_api.write_model(model)
# commit the model
versions.commit_model(message=opts['commitMessage'])
play_id = int(1000 * time.time())
# The following local functions are all steps that will be run
# asynchronously in a series of promises. In (some) other languages that
# support promises, code blocks can be entered directly as an argument
# to the 'then' function, but this is not easily done in python. The
# closest thing to them might be multi-line lambda functions, but these
# are intentionally unsupported in python:
# http://www.artima.com/weblogs/viewpost.jsp?thread=147358
#
# For clarity, the functions will be defined in the same order that they
# are called. The @copy_current_request_context decorator provided
# by flask permits functions to access the http context supplied to the
# parent function.
@copy_current_request_context
def run_config_processor_playbook():
LOG.info("Running config processor playbook")
payload = pick(opts, ('extra-vars',))
result = playbooks.run_playbook('config-processor-run', payload,
play_id)
# return the entire result object, including the promise and
# other data
return result
@copy_current_request_context
def run_ready_deployment_playbook(prev):
LOG.info("Running ready deployment playbook")
result = playbooks.run_playbook('ready-deployment', play_id=play_id)
return result['promise']
@copy_current_request_context
def cleanup(prev):
LOG.info("Server successfully removed")
| |
COLUMN_INDEX_ORDINATES or 1 if undefined'%self.get_input_value("useOrdinatesColumn")
if key == 'plot': return 'Plot: all heights slopes psd_h psd_s csd_h csd_s. histo_s histo_h acf_h acf_s. Default=%s'%repr(self.get_input_value("plot"))
if key == 'summary': return 'gets a summary of all DABAM profiles'
return ''
def get_input_value_short_name(self,key):
if key == 'entryNumber': return 'N'
if key == 'silent': return 's'
if key == 'localFileRoot': return 'l'
if key == 'outputFileRoot': return 'r'
if key == 'setDetrending': return 'D'
if key == 'nbinS': return 'b'
if key == 'nbinH': return 'e'
if key == 'shadowCalc': return 'S'
if key == 'shadowNy': return 'y'
if key == 'shadowNx': return 'x'
if key == 'shadowWidth': return 'w'
if key == 'multiply': return 'm'
if key == 'oversample': return 'I'
if key == 'useHeightsOrSlopes': return 'Z'
if key == 'useAbscissasColumn': return 'A'
if key == 'useOrdinatesColumn': return 'O'
if key == 'plot': return 'P'
if key == 'summary': return 'Y'
return '?'
#
# file names
#
def file_metadata(self):
return self._file_root()+'.txt'
def file_data(self):
return self._file_root()+'.dat'
#
# load profile and store data. This is the main action!!
#
def load(self,entry=None):
if entry is None:
pass
else:
self.set_input_entryNumber(entry)
# load data and metadata
self._load_file_metadata()
self._load_file_data()
# test consistency
if self.is_remote_access:
if self.get_input_value("entryNumber") <= 0:
raise Exception("Error: entry number must be non-zero positive for remote access.")
self.make_calculations()
def metadata_set_info(self,
YEAR_FABRICATION=None,
SURFACE_SHAPE=None,
FUNCTION=None,
LENGTH=None,
WIDTH=None,
THICK=None,
LENGTH_OPTICAL=None,
SUBSTRATE=None,
COATING=None,
FACILITY=None,
INSTRUMENT=None,
POLISHING=None,
ENVIRONMENT=None,
SCAN_DATE=None,
CALC_HEIGHT_RMS=None,
CALC_HEIGHT_RMS_FACTOR=None,
CALC_SLOPE_RMS=None,
CALC_SLOPE_RMS_FACTOR=None,
USER_EXAMPLE=None,
USER_REFERENCE=None,
USER_ADDED_BY=None,
):
#
# do not change these tags
#
# dm.metadata["FILE_FORMAT"] = None
# dm.metadata["FILE_HEADER_LINES"] = None
# dm.metadata["X1_FACTOR"] = None
# dm.metadata["COLUMN_INDEX_ORDINATES"] = None
# for i in range(4):
# dm.metadata["Y1_FACTOR"%(i+1)] = None
#
# for i in range(4):
# dm.metadata["PLOT_TITLE_X%d"%(i+1)] = None
# dm.metadata["PLOT_TITLE_Y%d"%(i+1)] = None
self.metadata["YEAR_FABRICATION"] = YEAR_FABRICATION
self.metadata["SURFACE_SHAPE"] = SURFACE_SHAPE
self.metadata["FUNCTION"] = FUNCTION
self.metadata["LENGTH"] = LENGTH
self.metadata["WIDTH"] = WIDTH
self.metadata["THICK"] = THICK
self.metadata["LENGTH_OPTICAL"] = LENGTH_OPTICAL
self.metadata["SUBSTRATE"] = SUBSTRATE
self.metadata["COATING"] = COATING
self.metadata["FACILITY"] = FACILITY
self.metadata["INSTRUMENT"] = INSTRUMENT
self.metadata["POLISHING"] = POLISHING
self.metadata["ENVIRONMENT"] = ENVIRONMENT
self.metadata["SCAN_DATE"] = SCAN_DATE
self.metadata["CALC_HEIGHT_RMS"] = CALC_HEIGHT_RMS
self.metadata["CALC_HEIGHT_RMS_FACTOR"] = CALC_HEIGHT_RMS_FACTOR
self.metadata["CALC_SLOPE_RMS"] = CALC_SLOPE_RMS
self.metadata["CALC_SLOPE_RMS_FACTOR"] = CALC_SLOPE_RMS_FACTOR
self.metadata["USER_EXAMPLE"] = USER_EXAMPLE
self.metadata["USER_REFERENCE"] = USER_REFERENCE
self.metadata["USER_ADDED_BY"] = USER_ADDED_BY
#
#calculations
#
def make_calculations(self):
#calculate detrended profiles
self._calc_detrended_profiles()
#calculate psd
self._calc_psd()
#calculate histograms
self._calc_histograms()
#calculate moments
self.momentsHeights = moment(self.zHeights)
self.momentsSlopes = moment(self.zSlopes)
# write files
if self.get_input_value("outputFileRoot") != "":
self._write_output_files()
#write shadow file
if self.get_input_value("shadowCalc"):
self._write_file_for_shadow()
if not(self.get_input_value("silent")):
outFile = self.get_input_value("outputFileRoot")+'Shadow.dat'
print ("File "+outFile+" for SHADOW written to disk.")
#info
if not(self.get_input_value("silent")):
print(self.info_profiles())
def stdev_profile_heights(self):
return self.zHeights.std(ddof=1)
def stdev_profile_slopes(self):
return self.zSlopes.std(ddof=1)
def stdev_psd_heights(self):
return numpy.sqrt(self.csdHeights[-1])
def stdev_psd_slopes(self):
return numpy.sqrt(self.csdSlopes[-1])
def stdev_user_heights(self):
try:
if self.metadata['CALC_HEIGHT_RMS'] != None:
if self.metadata['CALC_HEIGHT_RMS_FACTOR'] != None:
return float(self.metadata['CALC_HEIGHT_RMS']) * float(self.metadata['CALC_HEIGHT_RMS_FACTOR'])
else:
return float(self.metadata['CALC_HEIGHT_RMS'])
except:
return None
def stdev_user_slopes(self):
try:
if self.metadata['CALC_SLOPE_RMS'] != None:
if self.metadata['CALC_SLOPE_RMS_FACTOR'] != None:
return float(self.metadata['CALC_SLOPE_RMS']) * float(self.metadata['CALC_SLOPE_RMS_FACTOR'])
else:
return float(self.metadata['CALC_SLOPE_RMS'])
except:
return None
def csd_heights(self):
return numpy.sqrt(self.csdHeights)/self.stdev_psd_heights()
def csd_slopes(self):
return numpy.sqrt(self.csdSlopes)/self.stdev_psd_slopes()
def autocorrelation_heights(self):
c1,c2,c3 = autocorrelationfunction(self.y,self.zHeights)
return c3
def autocorrelation_slopes(self):
c1,c2,c3 = autocorrelationfunction(self.y,self.zSlopes)
return c3
#
# info
#
def info_profiles(self):
if self.zHeights is None:
return "Error: no loaded profile."
txt = ""
polDegree = self._get_polDegree()
#;
#; info
#;
#
txt += '\n---------- profile results -------------------------\n'
if self.is_remote_access:
txt += 'Remote directory:\n %s\n'%self.server
txt += 'Data File: %s\n'%self.file_data()
txt += 'Metadata File: %s\n'%self.file_metadata()
try:
txt += "\nUser reference: %s\n"%self.metadata["USER_REFERENCE"]
except:
pass
try:
txt += "Added by (user): %s\n"%self.metadata["USER_ADDED_BY"]
except:
pass
try:
txt += '\nSurface shape: %s\n'%(self.metadata['SURFACE_SHAPE'])
except:
pass
try:
txt += 'Facility: %s\n'%(self.metadata['FACILITY'])
except:
pass
try:
txt += 'Scan length: %.3f mm\n'%(1e3*(self.y[-1]-self.y[0]))
except:
pass
txt += 'Number of points: %d\n'%(len(self.y))
txt += '\n'
if polDegree >= 0:
if polDegree == 1:
txt += "Linear detrending: z'=%g x%+g"%(self.coeffs[0],self.coeffs[1])+"\n"
txt += 'Radius of curvature: %.3F m'%(1.0/self.coeffs[-2])+"\n"
else:
txt += 'Polynomial detrending coefficients: '+repr(self.coeffs)+"\n"
elif polDegree == -1:
txt += 'No detrending applied.\n'
elif polDegree == -3:
txt += 'Ellipse detrending applied. Using Optimized parameters:\n'
txt += ' p = %f m \n'%self.coeffs[0]
txt += ' q = %f m \n'%self.coeffs[1]
txt += ' theta = %f rad \n'%self.coeffs[2]
txt += ' vertical shift = %f nm \n'%self.coeffs[3]
elif polDegree == -4:
txt += 'Ellipse detrending applied. Usinng Design parameters:\n'
txt += ' p = %f m \n'%self.coeffs[0]
txt += ' q = %f m \n'%self.coeffs[1]
txt += ' theta = %f rad \n'%self.coeffs[2]
txt += ' vertical shift = %f nm \n'%self.coeffs[3]
txt += self.statistics_summary()
txt += '----------------------------------------------------\n'
return txt
def statistics_summary(self):
txt = ""
txt += 'Slopes profile:\n'
txt += ' StDev of slopes profile: %.3f urad\n' %( 1e6*self.stdev_profile_slopes() )
txt += ' from PSD: %.3f urad\n' %( 1e6*self.stdev_psd_slopes())
if self.stdev_user_slopes() != None:
txt += ' from USER (metadata): %.3f urad\n' %(1e6*self.stdev_user_slopes())
txt += ' Peak-to-valley: no detrend: %.3f urad\n' %(1e6*(self.zSlopesUndetrended.max() - self.zSlopesUndetrended.min()))
txt += ' with detrend: %.3f urad\n' %(1e6*(self.zSlopes.max() - self.zSlopes.min() ))
txt += ' Skewness: %.3f, Kurtosis: %.3f\n' %(self.momentsSlopes[2],self.momentsSlopes[3])
beta = -self.powerlaw["slp_pendent"]
txt += ' PSD power law fit: beta:%.3f, Df: %.3f\n' %(beta,(5-beta)/2)
txt += ' Autocorrelation length:%.3f\n' %(self.autocorrelation_slopes())
txt += 'Heights profile: \n'
txt += ' StDev of heights profile: %.3f nm\n' %(1e9*self.stdev_profile_heights() )
txt += ' from PSD: %.3f nm\n' %(1e9*self.stdev_psd_heights() )
if self.stdev_user_heights() != None:
txt += ' from USER (metadata): %.3f nm\n' %(1e9*self.stdev_user_heights())
txt += ' Peak-to-valley: no detrend: %.3f nm\n' %(1e9*(self.zHeightsUndetrended.max() - self.zHeightsUndetrended.min()))
txt += ' with detrend: %.3f nm\n' %(1e9*(self.zHeights.max() - self.zHeights.min() ))
txt += ' Skewness: %.3f, Kurtosis: %.3f\n' %(self.momentsHeights[2],self.momentsHeights[3])
beta = -self.powerlaw["hgt_pendent"]
txt += ' PSD power law fit: beta:%.3f, Df: %.3f\n' %(beta,(5-beta)/2)
txt += ' Autocorrelation length:%.3f\n' %(self.autocorrelation_heights())
return txt
def plot(self,what=None):
try:
from matplotlib import pylab as plt
except:
print("Cannot make plots. Please install matplotlib.")
return None
if what is None:
what = self.get_input_value("plot")
if what == "all":
what = ["heights","slopes","psd_h","psd_s","csd_h","cds_s","histo_s","histo_h"]
else:
what = what.split(" ")
for i,iwhat in enumerate(what):
print("plotting: ",iwhat)
if (iwhat == "heights" ):
f1 = plt.figure(1)
plt.plot(1e3*self.y,1e6*self.zHeights)
plt.title("heights profile")
plt.xlabel("Y [mm]")
plt.ylabel("Z [um]")
elif (iwhat == "slopes"):
f2 = plt.figure(2)
plt.plot(1e3*self.y,1e6*self.zSlopes)
plt.title("slopes profile")
plt.xlabel("Y [mm]")
plt.ylabel("Zp [urad]")
elif (iwhat == "psd_h"):
f3 = plt.figure(3)
plt.loglog(self.f,self.psdHeights)
y = self.f**(self.powerlaw["hgt_pendent"])*10**self.powerlaw["hgt_shift"]
i0 = self.powerlaw["index_from"]
i1 = self.powerlaw["index_to"]
plt.loglog(self.f,y)
plt.loglog(self.f[i0:i1],y[i0:i1])
beta = -self.powerlaw["hgt_pendent"]
plt.title("PSD of heights profile (beta=%.2f,Df=%.2f)"%(beta,(5-beta)/2))
plt.xlabel("f [m^-1]")
plt.ylabel("PSD [m^3]")
elif (iwhat == "psd_s"):
f4 = plt.figure(4)
plt.loglog(self.f,self.psdSlopes)
y = self.f**(self.powerlaw["slp_pendent"])*10**self.powerlaw["slp_shift"]
i0 = self.powerlaw["index_from"]
i1 = self.powerlaw["index_to"]
plt.loglog(self.f,y)
plt.loglog(self.f[i0:i1],y[i0:i1])
beta = -self.powerlaw["slp_pendent"]
plt.title("PSD of slopes profile (beta=%.2f,Df=%.2f)"%(beta,(5-beta)/2))
plt.xlabel("f [m^-1]")
plt.ylabel("PSD [rad^3]")
elif (iwhat == "csd_h"):
f5 = plt.figure(5)
plt.semilogx(self.f,self.csd_heights())
plt.title("Cumulative Spectral Density of heights profile")
plt.xlabel("f [m^-1]")
plt.ylabel("csd_h")
elif (iwhat == "csd_s"):
f6 = plt.figure(6)
plt.semilogx(self.f,self.csd_slopes())
plt.title("Cumulative Spectral Density of slopes profile")
plt.xlabel("f [m^-1]")
plt.ylabel("csd_s")
elif (iwhat == "histo_s" ):
f7 = plt.figure(7)
plt.plot(1e6*self.histoSlopes["x_path"],self.histoSlopes["y1_path"])
plt.plot(1e6*self.histoSlopes["x_path"],self.histoSlopes["y2_path"])
plt.title("slopes histogram and Gaussian with StDev: %10.3f urad"%(1e6*self.stdev_profile_slopes()))
plt.xlabel("Z' [urad]")
plt.ylabel("counts")
elif (iwhat == "histo_h" ):
f8 = plt.figure(8)
plt.plot(1e9*self.histoHeights["x_path"],self.histoHeights["y1_path"])
plt.plot(1e9*self.histoHeights["x_path"],self.histoHeights["y2_path"])
plt.title("heights histogram and Gaussian with StDev: %10.3f nm"%(1e9*self.stdev_profile_heights()))
plt.xlabel("Z [nm]")
plt.ylabel("counts")
elif (iwhat == "acf_h" ):
f9 = plt.figure(9)
c1,c2,c3 = autocorrelationfunction(self.y,self.zHeights)
plt.plot(c1[0:-1],c2)
plt.title("Heights autocovariance. Autocorrelation length (acf_h=0.5)=%.3f m"%(c3))
plt.xlabel("Length [m]")
plt.ylabel("acf")
elif (iwhat == "acf_s" ):
f10 = plt.figure(10)
c1,c2,c3 = autocorrelationfunction(self.y,self.zSlopes)
plt.plot(c1[0:-1],c2)
plt.title("Slopes autocovariance. Autocorrelation length (acf_s=0.5)=%.3f m"%(c3))
plt.xlabel("Length [m]")
plt.ylabel("acf_s")
else:
print("Plotting options are: heights slopes psd_h psd_s csd_h csd_s acf_h acf_s")
return None
plt.show()
def write_template(self,number_string="000",FILE_FORMAT=1):
"""
FILE_FORMAT:
1 slopes in Col2
2 = heights in Col2
3 = slopes in Col2, file X1 Y1 X2 Y2
4 = heights in Col2, file X1 Y1 X2 Y2
:param number_string:
:param FILE_FORMAT:
:return:
"""
metadata = self.metadata.copy()
metadata["FILE_FORMAT"] = FILE_FORMAT
metadata["X1_FACTOR"] = 1.0
metadata["Y1_FACTOR"] = 1.0
j = json.dumps(metadata, ensure_ascii=True, indent=" ")
f = open("dabam-%s.txt"%number_string, 'w')
f.write(j)
f.close()
f = open("dabam-%s.dat"%number_string, 'w')
for i in range(self.y.size):
if metadata["FILE_FORMAT"] == 1:
f.write("%g %g\n" % (self.y[i], self.zSlopes[i]))
elif metadata["FILE_FORMAT"] == 2:
f.write("%g %g\n" % (self.y[i], self.zHeights[i]))
else:
raise Exception("Cannot write data with FILE_FORMAT != 1,2")
f.close()
print("Files %s and %s written to disk. "%("dabam-%s.txt"%number_string,"dabam-%s.txt"%number_string))
#
# auxiliar methods for internal use
#
def | |
0, "outbytes": 0},
25: {"inbytes": 0, "outbytes": 0},
30: {"inbytes": 0, "outbytes": 0},
},
'nn::am::service::ISystemAppletProxy': {
0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::ICommonStateGetter']},
1: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::ISelfController']},
2: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IWindowController']},
3: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IAudioController']},
4: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IDisplayController']},
10: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IProcessWindingController']},
11: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::ILibraryAppletCreator']},
20: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IHomeMenuFunctions']},
21: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IGlobalStateController']},
22: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IApplicationCreator']},
1000: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IDebugFunctions']},
},
'nn::am::service::IDebugFunctions': {
0: {"inbytes": 4, "outbytes": 0},
1: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::am::service::IApplicationAccessor']},
10: {"inbytes": 4, "outbytes": 0},
20: {"inbytes": 0, "outbytes": 0},
},
},
'ssl': {
'nn::ssl::sf::ISslService': {
0: {"inbytes": 0x10, "outbytes": 0, "outinterfaces": ['nn::ssl::sf::ISslContext'], "pid": True},
1: {"inbytes": 0, "outbytes": 4},
},
'nn::sf::hipc::detail::IHipcManager': {
0: {"inbytes": 0, "outbytes": 4},
1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]},
3: {"inbytes": 0, "outbytes": 2},
4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
},
'nn::ssl::sf::ISslContext': {
0: {"inbytes": 8, "outbytes": 0},
1: {"inbytes": 4, "outbytes": 4},
2: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::ssl::sf::ISslConnection']},
3: {"inbytes": 0, "outbytes": 4},
4: {"inbytes": 4, "outbytes": 8, "buffers": [5]},
5: {"inbytes": 0, "outbytes": 8, "buffers": [5, 5]},
6: {"inbytes": 8, "outbytes": 0},
7: {"inbytes": 8, "outbytes": 0},
8: {"inbytes": 4, "outbytes": 8},
},
'nn::ssl::sf::ISslConnection': {
0: {"inbytes": 4, "outbytes": 4},
1: {"inbytes": 0, "outbytes": 0, "buffers": [5]},
2: {"inbytes": 4, "outbytes": 0},
3: {"inbytes": 4, "outbytes": 0},
4: {"inbytes": 0, "outbytes": 4},
5: {"inbytes": 0, "outbytes": 4, "buffers": [6]},
6: {"inbytes": 0, "outbytes": 4},
7: {"inbytes": 0, "outbytes": 4},
8: {"inbytes": 0, "outbytes": 0},
9: {"inbytes": 0, "outbytes": 8, "buffers": [6]},
10: {"inbytes": 0, "outbytes": 4, "buffers": [6]},
11: {"inbytes": 0, "outbytes": 4, "buffers": [5]},
12: {"inbytes": 0, "outbytes": 4},
13: {"inbytes": 0, "outbytes": 4, "buffers": [6]},
14: {"inbytes": 8, "outbytes": 4},
15: {"inbytes": 0, "outbytes": 0},
16: {"inbytes": 0, "outbytes": 4},
17: {"inbytes": 4, "outbytes": 0},
18: {"inbytes": 0, "outbytes": 4},
19: {"inbytes": 0, "outbytes": 0},
20: {"inbytes": 4, "outbytes": 0},
21: {"inbytes": 0, "outbytes": 4},
},
},
'nim': {
'nn::ntc::detail::service::IStaticService': {
0: {"inbytes": 8, "outbytes": 0, "outinterfaces": ['nn::ntc::detail::service::IEnsureNetworkClockAvailabilityService']},
},
'nn::ntc::detail::service::IEnsureNetworkClockAvailabilityService': {
0: {"inbytes": 0, "outbytes": 0},
1: {"inbytes": 0, "outbytes": 0, "outhandles": [1]},
2: {"inbytes": 0, "outbytes": 0},
3: {"inbytes": 0, "outbytes": 0},
4: {"inbytes": 0, "outbytes": 1},
},
'nn::nim::detail::INetworkInstallManager': {
0: {"inbytes": 0x18, "outbytes": 0x10},
1: {"inbytes": 0x10, "outbytes": 0},
2: {"inbytes": 0, "outbytes": 4, "buffers": [6]},
3: {"inbytes": 0x10, "outbytes": 0, "outhandles": [1], "outinterfaces": ['nn::nim::detail::IAsyncResult']},
4: {"inbytes": 0x10, "outbytes": 0x28},
5: {"inbytes": 0x10, "outbytes": 0},
6: {"inbytes": 0x10, "outbytes": 0x10, "buffers": [5]},
7: {"inbytes": 0x10, "outbytes": 0},
8: {"inbytes": 0, "outbytes": 4, "buffers": [6]},
9: {"inbytes": 0x10, "outbytes": 0, "outhandles": [1], "outinterfaces": ['nn::nim::detail::IAsyncResult']},
10: {"inbytes": 0x10, "outbytes": 0x20},
11: {"inbytes": 0x10, "outbytes": 0},
12: {"inbytes": 0, "outbytes": 0, "outhandles": [1], "outinterfaces": ['nn::nim::detail::IAsyncValue']},
13: {"inbytes": 0x10, "outbytes": 0, "outhandles": [1], "outinterfaces": ['nn::nim::detail::IAsyncValue']},
14: {"inbytes": 8, "outbytes": 4, "buffers": [6]},
15: {"inbytes": 0x10, "outbytes": 4, "buffers": [6]},
16: {"inbytes": 0, "outbytes": 0, "buffers": [5], "outhandles": [1], "outinterfaces": ['nn::nim::detail::IAsyncValue']},
17: {"inbytes": 0x10, "outbytes": 0},
18: {"inbytes": 0x10, "outbytes": 0, "buffers": [5]},
19: {"inbytes": 0x18, "outbytes": 0, "buffers": [22]},
20: {"inbytes": 0x10, "outbytes": 8},
21: {"inbytes": 0x10, "outbytes": 1},
22: {"inbytes": 0, "outbytes": 0x10},
},
'nn::sf::hipc::detail::IHipcManager': {
0: {"inbytes": 0, "outbytes": 4},
1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]},
3: {"inbytes": 0, "outbytes": 2},
4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
},
'nn::nim::detail::IAsyncValue': {
0: {"inbytes": 0, "outbytes": 8},
1: {"inbytes": 0, "outbytes": 0, "buffers": [6]},
2: {"inbytes": 0, "outbytes": 0},
},
'nn::nim::detail::IAsyncResult': {
0: {"inbytes": 0, "outbytes": 0},
1: {"inbytes": 0, "outbytes": 0},
},
},
'lbl': {
'nn::lbl::detail::ILblController': {
0: {"inbytes": 0, "outbytes": 0},
1: {"inbytes": 0, "outbytes": 0},
2: {"inbytes": 4, "outbytes": 0},
3: {"inbytes": 0, "outbytes": 4},
4: {"inbytes": 0, "outbytes": 0},
5: {"inbytes": 0, "outbytes": 4},
6: {"inbytes": 8, "outbytes": 0},
7: {"inbytes": 8, "outbytes": 0},
8: {"inbytes": 0, "outbytes": 4},
9: {"inbytes": 0, "outbytes": 0},
10: {"inbytes": 0, "outbytes": 0},
11: {"inbytes": 0, "outbytes": 1},
12: {"inbytes": 0, "outbytes": 0},
13: {"inbytes": 0, "outbytes": 0},
14: {"inbytes": 0, "outbytes": 1},
15: {"inbytes": 4, "outbytes": 0},
16: {"inbytes": 0, "outbytes": 4},
17: {"inbytes": 8, "outbytes": 0},
18: {"inbytes": 4, "outbytes": 4},
19: {"inbytes": 0xC, "outbytes": 0},
20: {"inbytes": 0, "outbytes": 0xC},
21: {"inbytes": 0xC, "outbytes": 0},
22: {"inbytes": 0, "outbytes": 0xC},
},
'nn::sf::hipc::detail::IHipcManager': {
0: {"inbytes": 0, "outbytes": 4},
1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]},
3: {"inbytes": 0, "outbytes": 2},
4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
},
},
'btm': {
'nn::btm::IBtmSystemCore': {
0: {"inbytes": 0, "outbytes": 0},
1: {"inbytes": 0, "outbytes": 0},
2: {"inbytes": 0, "outbytes": 0},
3: {"inbytes": 0, "outbytes": 1},
4: {"inbytes": 0, "outbytes": 0},
5: {"inbytes": 0, "outbytes": 0},
6: {"inbytes": 0, "outbytes": 1},
},
'nn::btm::IBtm': {
0: {"inbytes": 0, "outbytes": 4},
1: {"inbytes": 0, "outbytes": 0x2A},
2: {"inbytes": 0, "outbytes": 0, "outhandles": [1]},
3: {"inbytes": 0, "outbytes": 0, "buffers": [26]},
4: {"inbytes": 7, "outbytes": 0},
5: {"inbytes": 0, "outbytes": 0, "buffers": [25]},
6: {"inbytes": 4, "outbytes": 0},
7: {"inbytes": 4, "outbytes": 0},
8: {"inbytes": 0, "outbytes": 0, "outhandles": [1]},
9: {"inbytes": 0, "outbytes": 0, "buffers": [26]},
10: {"inbytes": 0x60, "outbytes": 0},
11: {"inbytes": 6, "outbytes": 0},
12: {"inbytes": 6, "outbytes": 0},
13: {"inbytes": 6, "outbytes": 0},
14: {"inbytes": 0, "outbytes": 0},
15: {"inbytes": 0, "outbytes": 0},
16: {"inbytes": 6, "outbytes": 0},
17: {"inbytes": 6, "outbytes": 0, "buffers": [25]},
},
'nn::btm::IBtmSystem': {
0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::btm::IBtmSystemCore']},
},
'nn::sf::hipc::detail::IHipcManager': {
0: {"inbytes": 0, "outbytes": 4},
1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]},
3: {"inbytes": 0, "outbytes": 2},
4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
},
'nn::btm::IBtmDebug': {
0: {"inbytes": 0, "outbytes": 0, "outhandles": [1]},
1: {"inbytes": 0, "outbytes": 0},
2: {"inbytes": 0, "outbytes": 0},
3: {"inbytes": 0, "outbytes": 0, "buffers": [26]},
4: {"inbytes": 6, "outbytes": 0},
5: {"inbytes": 6, "outbytes": 0},
6: {"inbytes": 0xC, "outbytes": 0},
7: {"inbytes": 4, "outbytes": 0},
8: {"inbytes": 6, "outbytes": 0},
},
},
'erpt': {
'nn::erpt::sf::IManager': {
0: {"inbytes": 4, "outbytes": 0, "buffers": [6]},
1: {"inbytes": 0, "outbytes": 0, "outhandles": [1]},
},
'nn::sf::hipc::detail::IHipcManager': {
0: {"inbytes": 0, "outbytes": 4},
1: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
2: {"inbytes": 0, "outbytes": 0, "outhandles": [2]},
3: {"inbytes": 0, "outbytes": 2},
4: {"inbytes": 4, "outbytes": 0, "outhandles": [2]},
},
'nn::erpt::sf::ISession': {
0: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::erpt::sf::IReport']},
1: {"inbytes": 0, "outbytes": 0, "outinterfaces": ['nn::erpt::sf::IManager']},
},
'nn::erpt::sf::IContext': {
0: {"inbytes": 0, "outbytes": 0, "buffers": [5, 5]},
1: {"inbytes": 4, "outbytes": 0, "buffers": [5, 5, 5]},
},
'nn::erpt::sf::IReport': {
0: {"inbytes": 0x14, "outbytes": 0},
1: {"inbytes": 0, "outbytes": 4, "buffers": [6]},
2: {"inbytes": 4, "outbytes": 0},
3: {"inbytes": 0, "outbytes": 4},
4: {"inbytes": 0, "outbytes": 0},
5: {"inbytes": 0, "outbytes": 8},
},
},
'vi': {
'nn::visrv::sf::ISystemDisplayService': {
2201: {"inbytes": 0x10, "outbytes": 0},
2203: {"inbytes": 0x18, "outbytes": 0},
2204: {"inbytes": 8, "outbytes": 8},
2205: {"inbytes": 0x10, "outbytes": 0},
2207: {"inbytes": 0x10, "outbytes": 0},
2209: {"inbytes": 0x10, "outbytes": 0},
2312: {"inbytes": 0x10, "outbytes": 0x10, "buffers": [6]},
3000: {"inbytes": 8, "outbytes": 8, "buffers": [6]},
3002: {"inbytes": 8, "outbytes": 8, "buffers": [6]},
3200: {"inbytes": 8, "outbytes": 0x10},
3201: {"inbytes": 0x18, "outbytes": 0},
3202: {"inbytes": 8, "outbytes": 8},
3203: {"inbytes": 0x10, "outbytes": 0},
3204: {"inbytes": 8, "outbytes": 4},
3205: {"inbytes": 0x10, "outbytes": 0},
3206: {"inbytes": 8, "outbytes": 4},
3207: {"inbytes": 0x10, "outbytes": 0},
3208: {"inbytes": 8, "outbytes": 4},
3209: {"inbytes": 0x10, "outbytes": 0},
3210: {"inbytes": 8, "outbytes": 4},
3211: {"inbytes": 0x10, "outbytes": 0},
3214: {"inbytes": 8, "outbytes": 4},
3215: {"inbytes": 0x10, "outbytes": 0},
3216: {"inbytes": 8, "outbytes": 4},
3217: {"inbytes": 0x10, "outbytes": 0},
1200: {"inbytes": 8, "outbytes": 8},
1202: {"inbytes": 8, "outbytes": 8},
1203: {"inbytes": 8, "outbytes": 8},
3001: | |
<filename>setup_scripts/burn_streams.py
import os
from tabnanny import check
import time
import pickle
# import pandas as pd
import numpy as np
# import shapely
# from shapely.geometry import Polygon, Point
import geopandas as gpd
import xarray as xr
import rioxarray as rxr
import rasterio as rio
from rasterio import features
from numba import jit
from skimage.morphology import skeletonize
from scipy import ndimage
from pysheds.grid import Grid
from pysheds.view import Raster, ViewFinder
from pyproj import CRS, Proj
import warnings
warnings.filterwarnings('ignore')
t0 = time.time()
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(BASE_DIR, 'source_data/')
processed_data_dir = os.path.join(BASE_DIR, 'processed_data')
data_dir = '/media/danbot/Samsung_T5/geospatial_data/'
# dem_dir = os.path.join(DATA_DIR, 'dem_data/')
# dem_dir = os.path.join(data_dir, 'DEM_data/')
# processed_dem_dir = os.path.join(dem_dir, 'processed_dem/')
processed_dem_dir = '/home/danbot/Documents/code/hysets_validation/source_data/dem_data/processed_dem/'
# specify the DEM source
# either 'EarthEnv_DEM90' or 'USGS_3DEP'
DEM_source = 'EarthEnv_DEM90'
# DEM_source = 'USGS_3DEP'
def retrieve_and_preprocess_raster(region_code):
# load the region DEM once and iterate through all
# region_dem_path = os.path.join(processed_dem_dir, f'{region_code}_DEM_3005_{resolution}.tif')
if DEM_source == 'EarthEnv_DEM90':
region_dem_path = os.path.join(processed_dem_dir, f'{region_code}_{DEM_source}_3005_{resolution}.tif')
else:
region_dem_path = os.path.join(processed_dem_dir, f'{region_code}_DEM_3005_{resolution}.tif')
assert os.path.exists(region_dem_path)
rds = rxr.open_rasterio(region_dem_path, mask_and_scale=True, dtype=np.float32)
grid = Grid.from_raster(region_dem_path)
dem = grid.read_raster(region_dem_path, dtype=np.float64)
viewfinder = ViewFinder(affine=dem.affine, shape=dem.shape, crs=dem.crs, nodata=dem.nodata)
dat = rds.data[0]
raster = Raster(dat, viewfinder=viewfinder)
return raster, rds
def get_river_mask(region_code, rds):
nhn_grouped_vector_path = os.path.join(processed_data_dir, 'grouped_hydrographic_features/')
vector_rivers_path = os.path.join(nhn_grouped_vector_path, f'{region_code}/NLFLOW/')
# mask = create_mask(vector_rivers_path)
rivers = gpd.read_file(vector_rivers_path, mask_and_scale=True)
affine = rds.rio.transform(recalc=False)
# Rasterize river shapefile
river_raster = features.rasterize(rivers.geometry, out_shape=rds.shape[1:],
transform=affine, all_touched=False)
# Skeletonize river raster
river_raster = skeletonize(river_raster).astype(np.uint8)
# Create boolean mask based on rasterized river shapes
mask = river_raster.astype(bool)
return mask
# now create polygons using the raster just generated
def retrieve_raster(fpath):
dem = rxr.open_rasterio(fpath, mask_and_scale=True, dtype=np.float32)
crs = dem.rio.crs
affine = dem.rio.transform(recalc=False)
return dem, crs, affine
@jit(nopython=True)
def flatten_streams_windowed(dem, max_depth=1):
n_adjustments = 0
tot_adjustment = 0
rows = dem.shape[0] # # of steps in y direction
cols = dem.shape[1] # # of steps in x direction
# print(f' dem shape = {dem.shape}')
# create an array to track indices of all stream ends.
stream_ends = np.empty((3,))
stream_ends.fill(np.nan)
# stream_nodes = np.empty((1,2))
for i in range(rows):
for j in range(cols):
px_el = dem[i, j]
# instead of iterating through the whole image,
# just get a list of pixel indices (streams are sparse)
if np.isfinite(px_el):
# print(i, j, px_el)
c1, c2 = max(0, j - max_depth), min(j + max_depth + 1, cols)
r1, r2 = max(0, i - max_depth), min(i + max_depth + 1, rows)
window = dem[r1:r2, c1:c2]
# print('---------')
# print(f'target cell ({i},{j}) el = {px_el:.1f}')
# print(window)
# the target cell is the centre of the flattened matrix
# or different indices based on whether or not it's at an edge
target_idx_coords, flat_index_loc = find_del_loc(i, j, window.shape[1], max_depth)
# get the elevations surrounding the target pixel
outer_vals = np.delete(window, flat_index_loc)
# faster method of sorting when we just want the two smallest
# and don't care about order
two_smallest = np.partition(outer_vals, 2)[:2]
# print(outer_vals)
# print(np.count_nonzero(~np.isnan(outer_vals)))
# print('')
if np.count_nonzero(~np.isnan(outer_vals)) == 1:
# append the target index to track streamline terminus
stream_ends = np.append(stream_ends,(i, j, px_el))
# print('outer vals and two smallest: ')
# print(outer_vals, two_smallest)
# if the centre pixel is higher or lower than both lowest
# neighbours, set the pixel value to the average
if np.isfinite(two_smallest).all():
if np.less(two_smallest, np.array(px_el)).all() | np.less(np.array(px_el), two_smallest).all():
new_el = np.mean(two_smallest)
dem[i, j] = new_el
n_adjustments += 1
tot_adjustment += px_el - new_el
# print('edited window')
# print(dem[r1:r2, c1:c2])
# print('')
# print('')
# print('_______')
return dem, stream_ends, n_adjustments, tot_adjustment
@jit(nopython=True)
def find_del_loc(i, j, window_width, max_depth=1):
if i <= max_depth:
ti = i
else:
ti = max_depth
if j <= max_depth:
tj = j
else:
tj = max_depth
# get the index of the target pixel
# if the matrix is flattened
flat_index = window_width * ti + tj
# print(f'(ti, tj) = ({ti},{tj})')
# print(f'flat_index: {flat_index} {window_width}')
return (int(ti), int(tj)), int(flat_index)
@jit(nopython=True)
def get_min_outer_pixel_indices(window, outer_vals, flat_index_loc, xs, ys, ix, jx, checked_indices, prevent_inf_loop, rows, cols, max_depth):
# print(f'outer vals: {outer_vals}')
# get the index of the smallest outer value
total_len = len(window.flatten())
# print('outer vals: ', outer_vals)
min_outer_val_idx = np.nanargmin(outer_vals)
# print('min outer val idx: ', min_outer_val_idx)
# the middle pixel was deleted from the outer_vals array,
# so add 1 if the minimum index is in the back half of the
# array so we can reconstruct its 2d position
if min_outer_val_idx >= flat_index_loc:
min_outer_val_idx += 1
# reconstruct the 2d indices of the outer pixel with min elevation
# retrieve the row index that the min outer value falls in
min_row_idx = int(np.floor(min_outer_val_idx / window.shape[1]))
# retrieve the column index that the min outer value falls in
min_col_idx = int(min_outer_val_idx - (min_row_idx) * window.shape[1])
# test that we referenced the correct elevation.
min_outer_el = window[min_row_idx, min_col_idx]
# print(f'min idx ({min_row_idx},{min_col_idx}): el: {min_outer_el}')
# print(f'(ix, jx)=({ix},{jx})')
# convert the min neighbor's window index to dem index
new_dem_ix = ix + (min_row_idx - max_depth)
if ix < max_depth:
new_dem_ix = ix + min_row_idx
new_dem_jx = jx + (min_col_idx - max_depth)
if jx < max_depth:
new_dem_jx = jx + min_col_idx
new_dem_idx = (new_dem_ix, new_dem_jx)
# print('new dem idx: ', new_dem_idx)
indices_idx = np.where(np.logical_and(xs == new_dem_idx[0], ys == new_dem_idx[1]))[0][0]
if not np.any(np.in1d(checked_indices, indices_idx)):
# print(f'outer val index: {min_outer_val_idx}')
if min_outer_val_idx >= flat_index_loc:
min_outer_val_idx -= 1
# print('already checked cell. Update outer vals: ', outer_vals)
outer_vals[min_outer_val_idx] = np.nan
# print('already checked cell. Update outer vals: ', outer_vals)
# print('are all outer vals nan?: ', np.all(np.isnan(outer_vals)))
if np.all(np.isnan(outer_vals)):
return new_dem_idx, min_outer_el, indices_idx, prevent_inf_loop, True
else:
new_dem_idx, min_outer_el, indices_idx, prevent_inf_loop, end_of_line = get_min_outer_pixel_indices(window, outer_vals, flat_index_loc, xs, ys, ix, jx, checked_indices, prevent_inf_loop, rows, cols, max_depth)
prevent_inf_loop += 1
if prevent_inf_loop >= 4:
raise Exception; 'infinite loop!'
return new_dem_idx, min_outer_el, indices_idx, prevent_inf_loop, False
def get_windows(raster, dem, ix, jx, rows, cols):
i1, i2 = max(0, ix - 1), min(ix + 2, rows)
j1, j2 = max(0, jx - 1), min(jx + 2, cols)
# don't let window indices go beyond raster edges
dem_window = dem[i1:i2, j1:j2]
raster_window = raster[i1:i2, j1:j2]
return raster_window, dem_window
def check_adj_slope_elevations(raster_window, dem_window, ix, jx, rows, cols):
ta = time.time()
# sometimes the stream vector will not line
# up with the thalweg in the dem
# look at surrounding (nan) cells and
# replace the target cell elevation with a lower
# value if there is one (not on the headwater cell)
pairs = [[1, 0], [0, 1], [1, 2], [2, 1]]
if ix == 0: # top row
if jx == 0: # top left
pairs = [[1, 0], [0, 1]]
elif jx == cols: # top right
pairs = [[0, 0], [1, 1]]
else: # top middle
pairs = [[0, 0], [1, 1], [0, 2]]
if ix == rows: # bottom row
if jx == cols: # bottom right
pairs = [[1, 0], [0, 1]]
elif jx == 0: # bottom left
pairs = [[0, 0], [1, 1]]
else:
pairs = [[1, 0], [0, 1], [1, 2]]
nan_ixs = np.argwhere(np.isnan(raster_window)).tolist()
ics = [e for e in nan_ixs if e in pairs]
min_adjacent_el = 1E9
if len(ics) > 0:
els = [raster_window[ic[0], ic[1]] for ic in ics]
if len(els) > 0:
min_adjacent_el = min(els)
return min_adjacent_el
def check_adj_stream_els(dem, ci, cj):
stream_cells = np.argwhere(np.isfinite(dem))
adj_px = [e for e in stream_cells if tuple(e) != (ci, cj)]
# max_adj_idx = np.argmax(dem)
adj_els = [dem[p[0], p[1]] for p in adj_px]
if len(adj_els) > 0:
return (min(adj_els), max(adj_els))
else:
return None
def travel_stream(raster, dem, indices, check, tot_adjustment, n_adjustments, max_depth=1):
# don't check the first (headwater) cell
headwater_cell_unchecked = False
n_checks = 0
while len(indices) > 0:
check += 1
# if check >= 10:
# break
(ix, jx) = indices.pop()
px_el = dem[ix, jx]
px_el_og = px_el
rows, cols = raster.shape[0], raster.shape[1]
(ci, cj), flat_idx = find_del_loc(ix, jx, dem.shape[1])
raster_window, dem_window = get_windows(raster, dem, ix, jx, rows, cols)
min_adjacent_slope_el, next_idx = check_adj_slope_elevations(raster_window, dem_window, ix, jx, rows, cols)
if not min_adjacent_slope_el:
pass
# print(f'current: {i}, el: {px_el:.1f} checked_indices', checked_indices)
# checked_el[i] = px_el
# if there is an outer pixel with a lower elevation
# that isn't in the | |
<filename>src/software/decode/ctypeAutoGen.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
'''
Brief:
ctypeAutoGen.py - Method and Appratus to extract compiled code into interpreted code.
Description:
Software application to decode firmware C data stuctures to python c-types.
Classes:
Enter ("") to display Class listings.
Requirement(s):
All data structure types must be named, meaning no Anonymous subtypes.
I.E. Invalid coding standard!
typedef struct
{
union
{
struct
{
uint32_t bottom: 16;
uint32_t top: 16;
};
uint32_t height;
};
} person;
I.E. Valid coding standard!
typedef struct
{
uint32_t bottom: 16;
uint32_t top: 16;
} personParts_t;
typedef union
{
uint32_t height; // Note: the all or struct must be before the bitfields.
personParts_t part;
} personAll_t;
typedef struct
{
personAll_t allMeta;
} personHeight_t;
Usage:
To decode within a wrapper or a python script
from telemetry.commands.telemetryCmd import *
myObj = TelemetryObjectCommands()
myObj.parseTelemetry(5,inFile=r"\testball\testTelemHostInitObjBis\Object_5_ver1.0_bank0.bin") #todo: update usage
'''
################################################################################################################
################################################################################################################
## General Python module imports
################################################################################################################
################################################################################################################
import os, re, sys, ctypes, shutil, errno, logging, platform, uuid, random
import time, builtins, struct, binascii, filecmp # @todo cleanup unused... exceptions
try:
import enum # @todo cleanup unused
from enum import * # pip install enum34 # backport from 3.x to 2.7 https://pypi.org/project/enum34/ # @todo cleanup explicit usage
except:
pass
################################################################################################################
################################################################################################################
## Explicit importing of headers
################################################################################################################
################################################################################################################
from pprint import pprint
from threading import Timer
from subprocess import Popen, PIPE
from optparse import OptionParser
from ctypes import * # @todo cleanup explicit usage
from ctypes.util import find_library # @todo cleanup explicit usage
from optparse import OptionGroup # @todo cleanup explicit usage
from sys import version_info # @todo cleanup explicit usage
# from builtins import classmethod, int, long # @todo cleanup explicit usage
# from exceptions import * # @todo cleanup explicit usage
################################################################################################################
################################################################################################################
## Debug methods.
################################################################################################################
################################################################################################################
ENABLE_CLANG = 0 # @todo Adding LLVM clang parser
ENABLE_DEBUG_ENTER = 0 # @todo debug switch
################################################################################################################
################################################################################################################
## LLVM CLang Compiler Keywords
################################################################################################################
################################################################################################################
if ENABLE_CLANG:
print("Using Clang and it is not supported yet...")
# Diagram of API https://coggle.it/diagram/VSk7_32dyC9M7Wtk/t/python-clang
# import clang # @todo
# from clang.cindex import Index # @todo
# from clang.cindex import CursorKind, TypeKind # @todo
# from clang.cindex import Index, TranslationUnit # @todo
# from clang.cindex import TypeKind # @todo
# import ctypeslib # @todo
# from ctypeslib.codegen import cursorhandler # @todo
# from ctypeslib.codegen import typedesc # @todo
# from ctypeslib.codegen import typehandler # @todo
# from ctypeslib.codegen import util # @todo
# from ctypeslib.codegen.util import log_entity # @todo
# from ctypeslib.codegen.handler import ClangHandler # @todo
# from ctypeslib.codegen.handler import CursorKindException # @todo
# from ctypeslib.codegen.handler import InvalidDefinitionError # @todo
# from ctypeslib.codegen.handler import DuplicateDefinitionException # @todo
################################################################################################################
################################################################################################################
## Operation Mode
################################################################################################################
################################################################################################################
TRUNK = None
def redefineMedia(shellMode=False):
'''
Allows for assignment of the shell status for subprocess execution.
'''
global TRUNK
if (shellMode == True):
TRUNK = "NAND"
else:
TRUNK = "SXP"
return
################################################################################################################
################################################################################################################
## Filenaming Globals and Updates for Threading
################################################################################################################
################################################################################################################
extTXT = ".txt"
fname_structDefFile = str("ctypeAutoGen_structDefs" + extTXT)
fname_subStructDefFile = str("ctypeAutoGen_subStructDefs" + extTXT)
fname_structSizeFile = str("ctypeAutoGen_structSizes" + extTXT)
fname_srcFileFile = str("ctypeAutoGen_srcFiles" + extTXT)
fname_typedefFile = str("ctypeAutoGen_typedefs" + extTXT)
fname_tempSubStructDefs = str("ctypeAutoGen_tempSubStructDefs" + extTXT)
fname_logFileName = str("ctypeAutoGen_log" + extTXT)
extRC = ".rc"
fname_multiCmdFile = str("ctypeAutoGen_multiCmdFile" + extRC)
fname_subStructMultiCmdFile = str("ctypeAutoGen_subStructMultiCmdFile" + extRC)
fname_structSizeMultiCmdFile = str("ctypeAutoGen_structSizeMultiCmdFile" + extRC)
fname_srcFileMultiCmdFile = str("ctypeAutoGen_srcFileMultiCmdFile" + extRC)
fname_typedefMultiCmdFile = str("ctypeAutoGen_typedefMultiCmdFile" + extRC)
def redefineFileNames():
'''
Allows for unique id of files.
'''
global TRUNK
global fname_structDefFile
global fname_subStructDefFile
global fname_structSizeFile
global fname_srcFileFile
global fname_typedefFile
global fname_tempSubStructDefs
global fname_logFileName
global fname_multiCmdFile
global fname_subStructMultiCmdFile
global fname_structSizeMultiCmdFile
global fname_srcFileMultiCmdFile
global fname_typedefMultiCmdFile
tagCreate = str(os.getpid()) + "-" + str(datetime.now()) + "-" + str(random.randint(1, 1024))
uPIDName = str(uuid.uuid5(uuid.NAMESPACE_DNS, tagCreate))
fname_structDefFile = str("ctypeAutoGen_structDefs_" + uPIDName + extTXT)
fname_subStructDefFile = str("ctypeAutoGen_subStructDefs_" + uPIDName + extTXT)
fname_structSizeFile = str("ctypeAutoGen_structSizes_" + uPIDName + extTXT)
fname_srcFileFile = str("ctypeAutoGen_srcFiles_" + uPIDName + extTXT)
fname_typedefFile = str("ctypeAutoGen_typedefs_" + uPIDName + extTXT)
fname_tempSubStructDefs = str("ctypeAutoGen_tempSubStructDefs_" + uPIDName + extTXT)
fname_logFileName = str("ctypeAutoGen_log_" + uPIDName + extTXT)
fname_multiCmdFile = str("ctypeAutoGen_multiCmdFile_" + uPIDName + extRC)
fname_subStructMultiCmdFile = str("ctypeAutoGen_subStructMultiCmdFile_" + uPIDName + extRC)
fname_structSizeMultiCmdFile = str("ctypeAutoGen_structSizeMultiCmdFile_" + uPIDName + extRC)
fname_srcFileMultiCmdFile = str("ctypeAutoGen_srcFileMultiCmdFile_" + uPIDName + extRC)
fname_typedefMultiCmdFile = str("ctypeAutoGen_typedefMultiCmdFile_" + uPIDName + extRC)
################################################################################################################
################################################################################################################
## Reg Expression
################################################################################################################
################################################################################################################
if not ENABLE_CLANG:
TRUNK = "NAND"
#########################################################################
# Graphical Flow Draw: https://www.debuggex.com/?flavor=python#cheatsheet
#########################################################################
# Legend: Skip means ? outside of statement and Loop means (?) within. #
# ....................Skip............Skip...............................#
# ...................._____..........._____..............................#
# ....................|...|...........|...|..............................#
# ====Start====(})====(\s)====(\w)====(\s)====[;]====($)====End====......#
# ....................|...|...|...|...|...|..............................#
# ....................|___|...........|___|..............................#
# ....................Loop............Loop...............................#
#########################################################################
detectedStructureMainName = re.compile(r"(})(\s?)+(\w)+?(\s?)+?[;:]$")
#########################################################################
# ....................Skip............Skip...............................#
# ...................._____..........._____..............................#
# ....................|...|...........|...|..............................#
# ====Start====(})====(\s)====(\w)====(\s)====[;]====($)====End====......#
# ....................|...|...|...|...|...|..............................#
# ....................|___|...........|___|..............................#
# ....................Loop............Loop...............................#
#########################################################################
detectedStructureSubName = re.compile(r"(})(\s?)+(\w)+?(\s?)+?[;]$")
#########################################################################
# ....................Skip...............................................#
# ...................._____..............................................#
# ....................|...|..............................................#
# ====Start====(})====(\s)====[;]====($)====End====......................#
# ....................|...|..............................................#
# ....................|___|..............................................#
# ....................Loop...............................................#
#########################################################################
detectedAnonymousName = re.compile(r"(})(\s?)+?[;]$")
############################################################################
# Detection of Struct or Union Pointer in a line so we can assign MMU type #
############################################################################
# I.E.myvalue = struct transDmaDwordDesc_t*dmaAdmin; #
############################################################################
detectComplexStructOrUnionPointer = re.compile(
r"(((\s+(\w)+(\s)+)|(\s+(\w)+=\s+))|(\s+)?)(struct|union)(\s)+?((\w)+)?(\s+)?[*](\s+)?(\w)+(\s+)?[;](\s+)?")
############################################################################
# Detection of Struct or Union Pointer in a line so we can assign MMU type #
############################################################################
# I.E.struct transDmaDwordDesc_t*dmaAdmin; #
# I.E.union transDmaDwordDesc_t*dmaAdmin; #
############################################################################
detectSimpleStructOrUnionPointer = re.compile(
r"((\s+)?)(struct|union)(\s)+?((\w)+)?(\s+)?[*](\s+)?(\w)+(\s+)?[;](\s+)?")
############################################################################
# Detection of basic type Pointer in a line so we can assign MMU type #
############################################################################
# I.E.char*dmaAdmin; #
############################################################################
detectBasicPointer = re.compile(r"((\s+)?)(\w+)(\s+)?[*](\s+)?(\w)+(\s+)?[;](\s+)?")
# Sequences used in matching. Use precompiled version to accelerate code.
matchSequence = [None] * 27 # Assign size to the array
matchSequence[1] = re.compile(r"\d+: (.+)$")
matchSequence[2] = re.compile(r"^//")
matchSequence[3] = re.compile(r"^ObjectBegin==>(.+)")
matchSequence[4] = re.compile(r"^\_+")
matchSequence[5] = re.compile(r"^0x[a-fA-F0-9]+$")
matchSequence[6] = re.compile(r"^union \{$")
matchSequence[7] = re.compile(r"^(\w+) \{$")
matchSequence[8] = re.compile(r"^(\w+) (\w+) \{$")
matchSequence[9] = re.compile(r"^(\w+) union (\w+) \{$")
matchSequence[10] = re.compile(r"^(\w+) (\w+) (\w+) \{$")
matchSequence[11] = re.compile(r"^(\w+) = (\w+) \{$")
matchSequence[12] = re.compile(r"^(\w+) = union (\w+) \{$")
matchSequence[13] = re.compile(r"^(\w+) = (\w+) (\w+) \{$")
matchSequence[14] = re.compile(r"^([\w ]+) ([*\w]+);$")
matchSequence[15] = re.compile(r"^(\w+) = union (\w+?::.+) \{")
matchSequence[16] = re.compile(r"^(\w+) = (\w+?::.+) \{")
matchSequence[17] = re.compile(r"^(\w+) (\w+?::.+) \{")
matchSequence[18] = re.compile(r"^\d+$")
matchSequence[19] = re.compile(r"^(versionMajor) = (.+)")
matchSequence[20] = re.compile(r"^(versionMinor) = (.+)")
matchSequence[21] = re.compile(
r"(\w+_[et])[ ;:]?") # NAND type enumeration, and type regex detection. @todo name_size_t causes a slice of name_s detected so removed from NAND.
matchSequence[22] = re.compile(r"(\w+_[ets])[ ;:]?") # SXP type enumeration, type, and struct regex detection.
matchSequence[23] = re.compile(
r"(\w+_[et])[ ;]") # NAND type enumeration, and type regex detection. @todo name_size_t causes a slice of name_s detected so removed from NAND.
matchSequence[24] = re.compile(r"(\w+_[ets])[ ;]") # SXP type enumeration, type, and struct regex detection.
matchSequence[25] = re.compile(r"(versionMajor) = (.+)")
matchSequence[26] = re.compile(r"(versionMinor) = (.+)")
################################################################################################################
################################################################################################################
## Python 2 and 3 redefines
################################################################################################################
################################################################################################################
# if (sys.version_info[0] < 3):
# # Python 3 code in this block
# range = xrange # @todo python 3 convert
#
# if (sys.version[:3] > "2.3"):
# import hashlib # @todo cleanup explicit usage
# try:
# # Python 2
# xrange # @todo python 2 convert
# except NameError:
# # Python 3
# xrange = range
# Python 3 and 2 check
# try:
# input = raw_input # @todo python 3 convert
# except NameError:
# pass
################################################################################################################
################################################################################################################
## Execute the binaries if there are no changes. I.E. Intel just in time compiler make the binary faster as it
## is used within our system.
################################################################################################################
################################################################################################################
try:
if platform.system() == 'Linux':
ghsPath = '/usr/ghs'
exeSuffix = ''
elif platform.system() == 'Windows':
ghsPath = 'c:/ghs'
exeSuffix = '.exe'
import win32com.shell.shell as shell
elif 'CYGWIN_NT' in platform.system():
ghsPath = 'c:/ghs'
exeSuffix = '.exe'
except:
print("Failed binary exe")
cmdPath, cmdFile = os.path.split(sys.argv[0])
usage = "%s --projectname PROJ_NAME --fwbuilddir FW_BUILD_DIR --tools TELEMETRY_TOOLS_DIR --multiexeversion MULTI_VER" % (
sys.argv[0])
################################################################################################################
## Helper function to pause for user input (for debug use only)
################################################################################################################
################################################################################################################
def pressReturnToContinue(aString=None):
if (ENABLE_DEBUG_ENTER == 1):
if (sys.version_info[0] < 3):
if aString is None:
usersInput = input("PRESS RETURN TO CONINTUE or 'q' to quit: ")
else:
usersInput = input("(%s) PRESS RETURN TO CONINTUE or 'q' to quit: " % (aString))
else:
usersInput = eval(input("PRESS RETURN TO CONINTUE or 'q' to quit"))
if (usersInput == 'q'):
sys.exit(0)
else:
print("Debug enter disabled.")
def formatDataControlObjects(enumGenFile):
# Process through specified input data object file to get list for scanning
iFile = open(enumGenFile, 'r')
if iFile.mode == 'r':
lines = iFile.readlines()
else:
if ENABLE_DEBUG_ENTER: quit(1)
iFile.close()
objectList = []
for l in lines:
line = l.strip()
line = re.sub('\/\/\/<', ' ', line)
line = re.sub('=', ',', line)
line = re.sub('^ +', '', line)
line = re.sub(' +', ' ', line)
line = re.sub(' +,', ',', line)
line | |
<filename>cand/cand.py
import numpy as np
import pandas as pd
import warnings
from sklearn import linear_model
import scipy.stats, pycasso
import matplotlib.pyplot as plt
import glmnet, tqdm
#import multiprocessing as mp
from .utils import GIC, lsIC
class candidate_models:
def __init__(self, X, y, evaluator='PLIC', varimp_type=None):
self.X, self.y = np.array(X).copy(), np.array(y).copy()
self.n, self.d = X.shape
if not evaluator in ['AIC', 'BIC', 'mBIC', 'eBIC', 'PLIC']:
self.evaluator = 'PLIC'
warnings.warn('[evaluator] is set to [PLIC]')
else:
self.evaluator = evaluator
self.varimp_type = varimp_type
if varimp_type is None:
self.varimp = None
else:
self.calculate_varimp()
if any(self.varimp == 0.0):
self.varimp[self.varimp == 0.0] = min(1e-7, np.min(self.varimp[self.varimp > 0.0]))
def calculate_varimp(self):
if self.varimp_type == 'marcor':
self.varimp = np.abs(np.corrcoef(self.y, self.X.T)[0,:][1:])
elif self.varimp_type == 'holp':
XX = np.append(np.ones((self.n, 1)), self.X, axis=1)
if self.n > self.d:
self.varimp = np.dot(np.linalg.inv(np.dot(XX.T, XX)), XX.T).dot(self.y)[1:]
else:
self.varimp = np.dot(XX.T, np.linalg.inv(np.dot(XX, XX.T))).dot(self.y)[1:]
elif self.varimp_type.upper() == 'NR17':
# A variable importance method proposed by <NAME> Ritov (2017, JMLR)
# Lasso
l1_fit = linear_model.LassoCV(cv=5, n_jobs=-1).fit(self.X, self.y)
l1_coef = l1_fit.coef_
l1_model = np.where(np.abs(l1_coef) > 0.0)[0]
# Elastic net
en_fit = linear_model.ElasticNetCV(cv=5, n_jobs=-1).fit(self.X, self.y)
en_coef = en_fit.coef_
en_model = np.where(np.abs(en_coef) > 0.0)[0]
#
en_only = np.array(list(set(en_model) - set(l1_model)))
penalties = np.ones(self.X.shape[1])
delta = np.linspace(0.0, 0.1, 51)
if en_only.size > 0:
coef_l1_plus = np.zeros((delta.size, self.X.shape[1]))
for i in range(len(delta) - 1):
penalties[en_only] = delta[i]
coef_l1_plus[i, :] = glmnet.ElasticNet(alpha=1.0).fit(self.X, self.y, relative_penalties=penalties).coef_
coef_l1_plus[-1, :] = l1_coef
self.varimp = np.zeros(self.X.shape[1])
for j in range(self.X.shape[1]):
if j in l1_model:
ij = np.argmax(coef_l1_plus[:, j] == 0.0) if any(coef_l1_plus[:, j] == 0.0) else None
self.varimp[j] = 1.0 - delta[ij] / 2.0 if ij is not None else 0.0
else:
if j in en_only:
ij = np.argmax(coef_l1_plus[:, j] != 0.0) if any(coef_l1_plus[:, j] != 0.0) else None
self.varimp[j] = delta[ij] / 2.0 if ij is not None else 0.0
else:
self.varimp[j] = 0.0
else:
ij = np.argmax(l1_coef == 0.0) if any(l1_coef == 0.0) else None
for j in range(X.shape[1]):
self.varimp[j] = 1.0 - delta[ij] / 2.0 if j in l1_model else 0.0
else:
self.varimp = np.ones(self.d)
class GA(candidate_models):
'''
A genetic algorithm for high-quality model search
'''
def __init__(self, X, y, evaluator='PLIC', varimp_type='marcor', popSize=0, selection='proportional', mutation_type='varimp', mutation_rate=None, ggap=15, maxGen=100):
candidate_models.__init__(self, X, y, evaluator, varimp_type)
if popSize <= 0:
popSize = int(4 * np.ceil(1 + np.log(-X.shape[1] / np.log(0.9999)) / np.log(2)))
self.popSize, self.selection = popSize, selection
self.mutation_type = 'uniform' if mutation_type is not 'varimp' else mutation_type
if mutation_rate is None:
self.mutation_rate = 1.0 / X.shape[1]
else:
self.mutation_rate = mutation_rate
self.worst_fitness = np.nan
# Results
self.models = None
self.fitness = None
self.generations = {'fitness': [], 'model_size': [], 'model_history': []}
self.ggap = ggap
self.maxGen = maxGen
def get_fitness(self, models, n_jobs=-1):
# Negative GIC as fitness
fitness = -GIC(self.X, self.y, models, self.evaluator, n_jobs)
# Update 'worst_fitness'
self.worst_fitness = np.nanmin(np.append(self.worst_fitness, fitness))
return np.where(pd.isnull(fitness), self.worst_fitness, fitness)
def uniform_xover(self, models, prob_select):
if prob_select.sum() is not 1.0:
prob_select = prob_select / prob_select.sum()
m = models[np.random.choice(range(models.shape[0]), size=2, replace=True, p=prob_select), :]
if any(m[0, :] != m[1, :]):
idx = np.where(m[0, :] != m[1, :])[0]
idxx = np.random.binomial(1, 0.5, size=idx.size)
if idxx.sum() > 0:
m[0, idx[idxx == 1]], m[1, idx[idxx == 1]] = m[1, idx[idxx == 1]], m[0, idx[idxx == 1]]
# Mutation
if self.mutation_rate > 0.0:
for k in range(m.shape[0]):
prob_mutate = np.repeat(self.mutation_rate, self.d)
if (self.mutation_type == 'varimp') and (self.varimp.min() != self.varimp.max()):
idx0 = np.where(m[k, :] == 0)[0]
if idx0.size > 0:
pp = self.varimp[idx0]
prob_mutate[idx0] *= idx0.size * pp / pp.sum()
idx1 = np.where(m[k, :] != 0)[0]
if idx1.size > 0:
pp = np.where(np.isinf(1.0 / self.varimp[idx1]), 0.0, 1.0 / self.varimp[idx1])
prob_mutate[idx1] *= idx1.size * pp / pp.sum()
idx_mutate = np.where(np.random.random(self.d) < prob_mutate)[0]
if idx_mutate.size > 0:
m[k, idx_mutate] = 1 - m[k, idx_mutate]
return m
def fit(self, init_models='RP', model_history=False, seed=None, verbose=False, n_jobs=-1):
if seed is not None:
np.random.seed(seed)
# Generate initial population, fitness evaluation, and sort the models by fitness
if isinstance(init_models, np.ndarray):
self.models = 1 * (init_models != 0)
elif init_models == 'RP':
init_RP = RP(self.X, self.y, None, None, 'RP')
init_RP.fit()
self.models = init_RP.models
else:
'''
Random initial model generation
Using HyperGeometric distribution to determine the model sizes and
then randomly assign active positions based on variable importance
'''
init_model_sizes = scipy.stats.hypergeom(6 * min(self.n, self.d), 2 * min(self.n, self.d), min(self.n, self.d)).rvs(size=self.popSize)
self.models = np.zeros((self.popSize, self.d)).astype('int')
for k in range(self.popSize):
self.models[k, np.random.choice(self.d, size=init_model_sizes[k], replace=False, p=self.varimp/self.varimp.sum())] = 1
self.fitness = self.get_fitness(self.models, n_jobs)
idx_sort = np.argsort(self.fitness)[::-1]
self.models, self.fitness = self.models[idx_sort,], self.fitness[idx_sort]
# Generation summary
self.generations['fitness'].append(self.fitness)
self.generations['model_size'].append(self.models.sum(axis=1))
if model_history:
self.generations['model_history'].append(self.models)
# Updating the population
converge = False
it = 0
while not converge and (it <= self.maxGen):
# Check whether all current models are infeasible
if np.isnan(self.worst_fitness):
raise RuntimeError('All models in generation {} are infeasible'.format(it))
it += 1
if verbose:
print('\t{} generations'.format(it), end='\r')
old_models = self.models.copy()
# Elitism selection: keep the best model
self.models = old_models[0, :]
if np.unique(self.fitness).size == 1:
# If fitness values are all the same, it doesn't matter using which model weighting
prob_select = np.ones(self.popSize) / self.popSize
elif self.selection == 'proportional':
# Proportionate selection via GIC-based model weighting
prob_select = lsIC(self.X, self.y, old_models, self.evaluator, ic=-self.fitness, n_jobs=n_jobs)
else:
# Rank selection
fitness_rank = scipy.stats.rankdata(self.fitness, method='ordinal')
prob_select = 2.0 * fitness_rank.astype('float') / fitness_rank.size / (fitness_rank.size + 1.0)
# Uniform crossover and mutation
#children = np.vstack([self.uniform_xover(old_models, prob_select) for _ in range(self.popSize // 2)])
children = np.vstack([self.uniform_xover(old_models, prob_select) for _ in range(self.popSize - 1)])
self.models = np.vstack((self.models, children))
# Fitness evaluation and sort the models by fitness
self.fitness = self.get_fitness(self.models, n_jobs)
idx_sort = np.argsort(self.fitness)[::-1][:self.popSize]
self.models, self.fitness = self.models[idx_sort, :], self.fitness[idx_sort]
# Generation summary
self.generations['fitness'].append(self.fitness)
self.generations['model_size'].append(self.models.sum(axis=1))
if model_history:
self.generations['model_history'].append(self.models)
# Check convergence
if it > self.ggap:
converge = (scipy.stats.ttest_ind(self.generations['fitness'][it], self.generations['fitness'][it - self.ggap], equal_var=False)[1] >= 0.05)
# Remove the duplicated models in the last generation
self.models, idx = np.unique(self.models, return_index=True, axis=0)
self.fitness = self.fitness[idx]
idx = np.argsort(self.fitness)[::-1]
self.models = self.models[idx, :]
self.fitness = self.fitness[idx]
return
def plot_fitness(self, file=None, true_model=None, n_jobs=-1):
plt.plot(list(map(np.mean, self.generations['fitness'])), label='Average Fitness', color='forestgreen')
plt.plot(list(map(np.max, self.generations['fitness'])), label='Best Fitness', color='steelblue')
if true_model is not None:
plt.axhline(self.get_fitness(true_model.reshape(1, true_model.size), n_jobs), label='True Model', c='tomato', linestyle='--')
plt.legend(loc = 4)
plt.xlabel('Generation')
plt.ylabel('Fitness')
if file is not None:
plt.savefig(file + '.pdf', bbox_inches='tight')
else:
plt.show()
plt.cla()
plt.clf()
plt.close('all')
class SA(candidate_models):
'''
Simulated annealing algorithm of
<NAME> and <NAME> (2017)
"Identifying a Minimal Class of Models for High-dimensional Data"
Journal of Machine Learning Research, 18(24):1-29
'''
def __init__(self, X, y, evaluator='PLIC', varimp_type='NR17', popSize=100, max_iter=100):
candidate_models.__init__(self, X, y, evaluator, varimp_type)
self.popSize, self.max_iter = popSize, max_iter
def generate_init_model(self, model_size):
'''
Auxiliary function to choose initial model
'''
if model_size > self.varimp.size:
warnings.warn('[model_size] should be at most {:d}'.format(self.X.shape[1]))
return
model = np.zeros(self.varimp.size).astype('int')
taken = np.where(self.varimp > np.sort(self.varimp)[-model_size])[0]
if taken.size > 0:
model[taken] = 1
nfree = model_size - taken.size
idx = np.where(self.varimp == np.sort(self.varimp)[-model_size])[0]
if idx.size > 0:
model[np.random.choice(idx, nfree, replace=False)] = 1
return model
def metropolis_iter(self, old_model, BoltzmanT, n_jobs=-1):
'''
Iteration for fixed temperature (BoltzmanT), a model is suggested and then a decision is made
'''
new_model = old_model.copy()
def var_in_out(model, varimp, type_):
d = model.size
k = np.sum(model != 0.0)
idx = np.where(model == 0)[0] if type_ == "in" else np.where(model != 0)[0]
if type_ == 'in':
idx = np.where(model == 0)[0]
prob = np.ones(d - k) / (d - k) if all(varimp[idx] == 0.0) else varimp[idx] / varimp[idx].sum()
else:
idx = np.where(model != 0.0)[0]
prob = 1.0 / varimp[idx]
prob = np.where(np.isinf(prob), 0.0, prob)
prob /= prob.sum()
variable = np.random.choice(idx, 1, p=prob)
return idx, prob, variable
# Probabilities to transition to a new model
old_out = var_in_out(old_model, self.varimp, 'out')
old_in = var_in_out(old_model, self.varimp, 'in')
new_model[old_out[2]], new_model[old_in[2]] = 0, 1
prob_old2new = old_out[1][old_out[0] == old_out[2]] * old_in[1][old_in[0] == old_in[2]]
# Probabilities from the new model back to the old model
new_in = var_in_out(new_model, self.varimp, 'in')
new_out = var_in_out(new_model, self.varimp, 'out')
prob_new2old = new_in[1][new_in[0] == old_out[2]] * new_out[1][new_out[0] == old_in[2]]
#
# GIC values of the old and new models
ic = GIC(self.X, self.y, np.array([old_model, | |
import mock
from twisted.internet import defer
from twisted.trial import unittest
from oppy.cell.definitions import MAX_RPAYLOAD_LEN
from oppy.cell.fixedlen import Create2Cell, DestroyCell, EncryptedCell
from oppy.cell.relay import (
RelayDataCell,
RelayEndCell,
RelayConnectedCell,
RelaySendMeCell,
RelayExtendedCell,
RelayTruncatedCell,
RelayDropCell,
RelayResolvedCell,
RelayExtended2Cell,
RelayExtendCell,
)
from oppy.circuit.circuit import Circuit
from oppy.circuit.definitions import (
CState,
CircuitType,
SENDME_THRESHOLD,
WINDOW_SIZE
)
from oppy.util.exitrequest import ExitRequest
from oppy.util.tools import ctr
ID = 0
TEST_MAX_STREAMS = 10
class CircuitTest(unittest.TestCase):
@mock.patch('oppy.circuit.circuitmanager.CircuitManager', autospec=True)
@mock.patch('oppy.connection.connection.Connection', autospec=True)
@mock.patch('oppy.path.path.Path', autospec=True)
def setUp(self, path, conn, cm):
crypt_path = [mock.Mock(), mock.Mock(), mock.Mock()]
self.circuit = Circuit(cm, ID, conn, CircuitType.IPv4,
path, crypt_path, TEST_MAX_STREAMS)
self.circuit_ipv6 = Circuit(cm, ID, conn, CircuitType.IPv6,
path, crypt_path)
def test_canHandleRequets_port_yes(self):
self.circuit._path.exit.descriptor.exit_policy.can_exit_to = mock.Mock()
self.circuit._path.exit.descriptor.exit_policy.can_exit_to.return_value = True
self.circuit._state = CState.OPEN
request = ExitRequest('\x01\xbb', addr=u'127.0.0.1')
self.assertTrue(self.circuit.canHandleRequest(request))
def test_canHandleRequest_port_no(self):
self.circuit._path.exit.microdescriptor.exit_policy.can_exit_to = mock.Mock()
self.circuit._path.exit.microdescriptor.exit_policy.can_exit_to.return_value = False
self.circuit._state = CState.OPEN
request = ExitRequest('\x01\xbb', addr=u'127.0.0.1')
self.assertFalse(self.circuit.canHandleRequest(request))
def test_canHandleRequest_buffering_no(self):
# TODO: rename CSTATE
self.circuit._state = CState.BUFFERING
request = ExitRequest('\x01\xbb', addr=u'127.0.0.1')
self.assertFalse(self.circuit.canHandleRequest(request))
def test_canHandleRequest_max_streams_no(self):
self.circuit._streams = {k: mock.Mock() for k in xrange(1, TEST_MAX_STREAMS+1)}
request = ExitRequest('\x01\xbb', host='https://riseup.net')
self.assertFalse(self.circuit.canHandleRequest(request))
@mock.patch('twisted.internet.defer.DeferredQueue', autospec=True)
def test_send(self, mock_dq):
self.circuit._write_queue = mock_dq
mock_stream = mock.Mock()
mock_stream.stream_id = 6
self.circuit.send('test', mock_stream)
self.assertEqual(self.circuit._write_queue.put.call_count, 1)
def test_send_too_long(self):
s = 'a' * MAX_RPAYLOAD_LEN
s += 'a'
self.assertRaises(ValueError, self.circuit.send, s, 6)
@mock.patch('twisted.internet.defer.DeferredQueue', autospec=True)
def test_recv(self, mock_dq):
self.circuit._read_queue = mock_dq
self.circuit.recv('test')
self.assertEqual(self.circuit._read_queue.put.call_count, 1)
# TODO: test that the relayendcell actually has the correct stream_id
@mock.patch('oppy.stream.stream.Stream', autospec=True)
@mock.patch('oppy.circuit.circuit.RelayEndCell', autospec=True)
def test_removeStream_more_remain(self, mock_relay_end_cell, mock_stream):
cell = mock.Mock()
mock_relay_end_cell.make.return_value = cell
mock_stream.stream_id = ID
self.circuit._streams = {ID+1: mock.Mock(), ID: mock_stream}
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit.removeStream(mock_stream)
self.assertTrue(mock_stream not in self.circuit._streams)
self.assertEqual(len(self.circuit._streams), 1)
self.circuit._encryptAndSendCell.assert_called_once_with(cell)
self.assertEqual(
self.circuit._circuit_manager.shouldDestroyCircuit.call_count, 0)
# TODO: test that the relayendcell actually has the correct stream_id
@mock.patch('oppy.stream.stream.Stream', autospec=True)
@mock.patch('oppy.circuit.circuit.RelayEndCell', autospec=True)
def test_removeStream_zero_remain_destroy_no(self, mock_relay_end_cell,
mock_stream):
cell = mock.Mock()
mock_relay_end_cell.make.return_value = cell
mock_stream.stream_id = ID
self.circuit._streams = {ID: mock_stream}
self.circuit._circuit_manager.shouldDestroyCircuit.return_value = False
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
self.circuit.removeStream(mock_stream)
self.assertTrue(mock_stream not in self.circuit._streams)
self.circuit._encryptAndSendCell.assert_called_once_with(cell)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 0)
self.assertEqual(self.circuit._closeCircuit.call_count, 0)
@mock.patch('oppy.stream.stream.Stream', autospec=True)
@mock.patch('oppy.circuit.circuit.RelayEndCell', autospec=True)
def test_removeStream_zero_remain_destroy_yes(self, mock_relay_end_cell,
mock_stream):
cell = mock.Mock()
mock_relay_end_cell.make.return_value = cell
mock_stream.stream_id = ID
self.circuit._streams = {ID: mock_stream}
self.circuit._circuit_manager.shouldDestroyCircuit.return_value = True
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
self.circuit.removeStream(mock_stream)
self.assertTrue(mock_stream not in self.circuit._streams)
self.circuit._encryptAndSendCell.assert_called_once_with(cell)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 1)
self.assertEqual(self.circuit._closeCircuit.call_count, 1)
# TODO: better test, this doesn't really test anything
def test_removeStream_nonexistent(self):
mock_stream = mock.Mock()
mock_stream.stream_id = 1
self.circuit._streams = {}
self.circuit.removeStream(mock_stream)
# TODO: test
def test_removeStream_conn_send_fail(self):
pass
# TODO: make sure relayendcell has correct stream_id
@mock.patch('oppy.circuit.circuit.RelayBeginCell', autospec=True)
def test_beginStream(self, mock_relay_begin_cell):
mock_stream = mock.Mock()
mock_stream.stream_id = ID
mock_stream.request = mock.Mock()
cell = mock.Mock()
mock_relay_begin_cell.make.return_value = cell
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit.beginStream(mock_stream)
self.circuit._encryptAndSendCell.assert_called_once_with(cell)
# TODO: test
def test_beginStream_conn_send_fail(self):
pass
def test_addStreamAndSetStreamID(self):
mock_stream = mock.Mock()
self.circuit.addStreamAndSetStreamID(mock_stream)
self.assertEqual(mock_stream.stream_id, 1)
self.assertTrue(self.circuit._streams[1] == mock_stream)
def test_addStreamAndSetStreamID_ctr_find_next_free_id(self):
self.circuit._streams = {}
for i in xrange(1, TEST_MAX_STREAMS-3):
self.circuit._streams[i] = mock.Mock()
mock_stream = mock.Mock()
self.circuit.addStreamAndSetStreamID(mock_stream)
self.assertEqual(mock_stream.stream_id, TEST_MAX_STREAMS-3)
self.assertEqual(self.circuit._streams[TEST_MAX_STREAMS-3], mock_stream)
def test_addStreamAndSetStreamID_ctr_rollover_find_next_free_id(self):
self.circuit._ctr = ctr(TEST_MAX_STREAMS)
for i in xrange(1, TEST_MAX_STREAMS-4):
next(self.circuit._ctr)
self.circuit._streams[TEST_MAX_STREAMS-4] = mock.Mock()
self.circuit._streams[TEST_MAX_STREAMS-3] = mock.Mock()
self.circuit._streams[TEST_MAX_STREAMS-2] = mock.Mock()
self.circuit._streams[TEST_MAX_STREAMS-1] = mock.Mock()
self.circuit._streams[TEST_MAX_STREAMS] = mock.Mock()
self.circuit._streams[1] = mock.Mock()
self.circuit._streams[2] = mock.Mock()
self.circuit._streams[3] = mock.Mock()
mock_stream = mock.Mock()
self.circuit.addStreamAndSetStreamID(mock_stream)
self.assertEqual(mock_stream.stream_id, 4)
self.assertTrue(self.circuit._streams[4] == mock_stream)
def test_addStreamAndSetStreamID_max_streams_full(self):
self.circuit._max_streams = 0
mock_stream = mock.Mock()
self.assertRaises(RuntimeError,
self.circuit.addStreamAndSetStreamID,
mock.Mock())
# TODO: test cell is made with correct stream_id
@mock.patch('oppy.circuit.circuit.RelaySendMeCell', autospec=True)
def test_sendStreamSendMe(self, mock_relay_sendme_cell):
cell = mock.Mock()
mock_relay_sendme_cell.make.return_value = cell
mock_stream = mock.Mock()
mock_stream.stream_id = ID
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit.sendStreamSendMe(mock_stream)
self.circuit._encryptAndSendCell.assert_called_once_with(cell)
# TODO: test
def test_sendStreamSendMe_conn_send_fail(self):
pass
def test_destroyCircuitFromManager(self):
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeAllStreams = mock.Mock()
self.circuit._connection.removeCircuit = mock.Mock()
self.circuit.destroyCircuitFromManager()
self.assertEqual(self.circuit._sendDestroyCell.call_count, 1)
self.assertEqual(self.circuit._closeAllStreams.call_count, 1)
self.assertEqual(self.circuit._connection.removeCircuit.call_count, 1)
# TODO: test
def test_destroyCircuitFromManager_conn_send_fail(self):
pass
def test_destroyCircuitFromConnection(self):
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
self.circuit.destroyCircuitFromConnection()
self.assertEqual(self.circuit._sendDestroyCell.call_count, 0)
self.assertEqual(self.circuit._closeCircuit.call_count, 1)
# test deferred is properly assigned
# test that correct callback is called when succeeding
@mock.patch('twisted.internet.defer.DeferredQueue', autospec=True)
def test_pollReadQueue(self, mock_dq):
d = defer.Deferred()
self.circuit._read_queue = mock_dq
self.circuit._read_queue.get.return_value = d
self.circuit._recvCell = mock.Mock()
self.circuit._pollReadQueue()
self.assertEqual(self.circuit._read_task, d)
self.circuit._read_task.callback('test')
self.circuit._recvCell.assert_called_once_with('test')
@mock.patch('twisted.internet.defer.DeferredQueue', autospec=True)
def test_pollWriteQueue(self, mock_dq):
d = defer.Deferred()
self.circuit._write_queue = mock_dq
self.circuit._write_queue.get.return_value = d
self.circuit._writeData = mock.Mock()
self.circuit._pollWriteQueue()
self.assertEqual(self.circuit._write_task, d)
self.circuit._write_task.callback('test')
self.circuit._writeData.assert_called_once_with('test')
# TODO: check that cell has correct id and data
@mock.patch('oppy.circuit.circuit.RelayDataCell', autospec=True)
def test_writeData(self, mock_relay_data_cell):
cell = mock.Mock()
mock_relay_data_cell.make.return_value = cell
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit._decPackageWindow = mock.Mock()
self.circuit._writeData(('test', ID))
self.circuit._encryptAndSendCell.assert_called_once_with(cell)
self.assertEqual(self.circuit._decPackageWindow.call_count, 1)
# TODO: test
def test_writeData_conn_send_fail(self):
pass
def test_recvCell_relay_cell(self):
self.circuit._recvRelayCell = mock.Mock()
self.circuit._pollReadQueue = mock.Mock()
cell = EncryptedCell.make(ID, 'a'*509)
self.circuit._recvCell(cell)
self.circuit._recvRelayCell.assert_called_once_with(cell)
self.assertEqual(self.circuit._pollReadQueue.call_count, 1)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_recvCell_non_backward_cell_fail(self, mock_logging):
self.circuit._recvRelayCell = mock.Mock()
self.circuit._pollReadQueue = mock.Mock()
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
cell = Create2Cell.make(ID, hdata=84*'a')
self.circuit._recvCell(cell)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 1)
self.assertEqual(self.circuit._closeCircuit.call_count, 1)
self.assertEqual(self.circuit._recvRelayCell.call_count, 0)
self.assertEqual(self.circuit._pollReadQueue.call_count, 0)
self.assertTrue(mock_logging.warning.called)
def test_recvCell_destroy_cell(self):
self.circuit._recvRelayCell = mock.Mock()
self.circuit._pollReadQueue = mock.Mock()
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
cell = DestroyCell.make(ID)
self.circuit._recvCell(cell)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 0)
self.assertEqual(self.circuit._closeCircuit.call_count, 1)
self.assertEqual(self.circuit._recvRelayCell.call_count, 0)
self.assertEqual(self.circuit._pollReadQueue.call_count, 0)
@mock.patch('oppy.crypto.util.decryptCell')
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_recvRelayCell_nonbackward_cell(self, mock_logging, mock_decrypt):
cell = RelayExtendCell('test')
mock_decrypt.return_value = (cell, 2)
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
self.circuit._recvRelayCell(cell)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 1)
self.assertEqual(self.circuit._closeCircuit.call_count, 1)
self.assertTrue(mock_logging.warning.called)
@mock.patch('oppy.crypto.util.decryptCell')
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_recvCell_relay_cell_decrypt_fail(self, mock_logging,
mock_decrypt):
mock_decrypt.side_effect = Exception
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
self.circuit._processRelayDataCell = mock.Mock()
self.circuit._processRelayEndCell = mock.Mock()
self.circuit._processRelayResolvedCell = mock.Mock()
self.circuit._processRelayTruncatedCell = mock.Mock()
self.circuit._processRelayConnectedCell = mock.Mock()
self.circuit._processRelaySendMeCell = mock.Mock()
self.circuit._processRelayDropCell = mock.Mock()
cell = EncryptedCell.make(ID, 'a'*509)
self.circuit._recvRelayCell(cell)
self.assertTrue(mock_logging.debug.call_count, 1)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 0)
self.assertEqual(self.circuit._closeCircuit.call_count, 0)
self.assertEqual(self.circuit._processRelayDataCell.call_count, 0)
self.assertEqual(self.circuit._processRelayEndCell.call_count, 0)
self.assertEqual(self.circuit._processRelayConnectedCell.call_count,
0)
self.assertEqual(self.circuit._processRelayResolvedCell.call_count, 0)
self.assertEqual(self.circuit._processRelayTruncatedCell.call_count,
0)
self.assertEqual(self.circuit._processRelaySendMeCell.call_count, 0)
self.assertEqual(self.circuit._processRelayDropCell.call_count, 0)
@mock.patch('oppy.crypto.util.decryptCell')
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_recvCell_relay_unexpected_cell_fail(self, mock_logging,
mock_decrypt):
mock_decrypt.return_value = (RelayExtended2Cell('test'), 2)
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
self.circuit._processRelayDataCell = mock.Mock()
self.circuit._processRelayEndCell = mock.Mock()
self.circuit._processRelayResolvedCell = mock.Mock()
self.circuit._processRelayTruncatedCell = mock.Mock()
self.circuit._processRelayConnectedCell = mock.Mock()
self.circuit._processRelaySendMeCell = mock.Mock()
self.circuit._processRelayDropCell = mock.Mock()
cell = EncryptedCell.make(ID, 'a'*509)
self.circuit._recvRelayCell(cell)
self.assertTrue(mock_logging.debug.call_count, 1)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 0)
self.assertEqual(self.circuit._closeCircuit.call_count, 0)
self.assertEqual(self.circuit._processRelayDataCell.call_count, 0)
self.assertEqual(self.circuit._processRelayEndCell.call_count, 0)
self.assertEqual(self.circuit._processRelayConnectedCell.call_count,
0)
self.assertEqual(self.circuit._processRelayResolvedCell.call_count, 0)
self.assertEqual(self.circuit._processRelayTruncatedCell.call_count,
0)
self.assertEqual(self.circuit._processRelaySendMeCell.call_count, 0)
self.assertEqual(self.circuit._processRelayDropCell.call_count, 0)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_data_cell(self, mock_decrypt):
cell = RelayDataCell.make(ID, ID, 'a')
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelayDataCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelayDataCell.assert_called_once_with(cell, 2)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_end_cell(self, mock_decrypt):
cell = RelayEndCell(ID, ID)
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelayEndCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelayEndCell.assert_called_once_with(cell, 2)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_connected_cell(self, mock_decrypt):
cell = RelayConnectedCell('test')
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelayConnectedCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelayConnectedCell.assert_called_once_with(cell,
2)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_sendme_cell(self, mock_decrypt):
cell = RelaySendMeCell.make(ID, ID)
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelaySendMeCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelaySendMeCell.assert_called_once_with(cell, 2)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_truncated_cell(self, mock_decrypt):
cell = RelayTruncatedCell('test')
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelayTruncatedCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelayTruncatedCell.assert_called_once_with(cell,
2)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_drop_cell(self, mock_decrypt):
cell = RelayDropCell('test')
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelayDropCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelayDropCell.assert_called_once_with(cell, 2)
@mock.patch('oppy.crypto.util.decryptCell')
def test_recvRelayCell_resolved_cell(self, mock_decrypt):
cell = RelayResolvedCell('test')
mock_decrypt.return_value = (cell, 2)
self.circuit._processRelayResolvedCell = mock.Mock()
self.circuit._recvRelayCell(cell)
self.circuit._processRelayResolvedCell.assert_called_once_with(cell,
2)
def test_processRelayDataCell(self):
cell = mock.Mock()
cell.rheader.stream_id = ID
mock_rpayload = mock.Mock()
cell.rpayload = mock_rpayload
mock_stream = mock.Mock()
self.circuit._streams = {ID: mock_stream}
self.circuit._decDeliverWindow = mock.Mock()
origin = 2
self.circuit._processRelayDataCell(cell, origin)
mock_stream.recv.assert_called_once_with(mock_rpayload)
self.assertEqual(self.circuit._decDeliverWindow.call_count, 1)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_processRelayDataCell_no_stream(self, mock_logging):
cell = mock.Mock()
cell.rheader.stream_id = ID
mock_stream = mock.Mock()
self.circuit._streams = {ID+1: mock_stream}
self.circuit._decDeliverWindow = mock.Mock()
origin = 2
self.circuit._processRelayDataCell(cell, origin)
self.assertEqual(mock_stream.recv.call_count, 0)
self.assertEqual(self.circuit._decDeliverWindow.call_count, 0)
self.assertTrue(mock_logging.debug.called)
def test_processRelayEndCell(self):
cell = mock.Mock()
cell.rheader.stream_id = ID
mock_stream = mock.Mock()
self.circuit._streams = {ID: mock_stream}
origin = 2
self.circuit._processRelayEndCell(cell, origin)
self.assertEqual(mock_stream.closeFromCircuit.call_count, 1)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_processRelayEndCell_no_stream(self, mock_logging):
self.circuit._streams = {}
cell = mock.Mock()
cell.rheader.stream_id = 1
self.circuit._processRelayEndCell(cell, 2)
self.assertTrue(mock_logging.debug.called)
def test_processRelayConnectedCell(self):
cell = mock.Mock()
cell.rheader.stream_id = ID
mock_stream = mock.Mock()
self.circuit._streams = {ID: mock_stream}
origin = 2
self.circuit._processRelayConnectedCell(cell, origin)
self.assertEqual(mock_stream.streamConnected.call_count, 1)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_processRelayConnectedCell_no_stream(self, mock_logging):
cell = mock.Mock()
cell.rheader.stream_id = ID
mock_stream = mock.Mock()
self.circuit._streams = {}
self.circuit._processRelayConnectedCell(cell, 2)
self.assertTrue(mock_logging.debug.called)
def test_processRelaySendMe_circuit_level(self):
cell = mock.Mock()
cell.rheader.stream_id = 0
self.circuit._incPackageWindow = mock.Mock()
origin = 2
self.circuit._processRelaySendMeCell(cell, origin)
self.assertEqual(self.circuit._incPackageWindow.call_count, 1)
def test_processRelaySendMe_stream_level(self):
cell = mock.Mock()
cell.rheader.stream_id = 1
mock_stream = mock.Mock()
mock_stream.stream_id = 1
self.circuit._streams = {1: mock_stream}
origin = 2
self.circuit._processRelaySendMeCell(cell, origin)
self.assertEqual(mock_stream.incPackageWindow.call_count, 1)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_processRelaySendMe_stream_level_no_stream(self, mock_logging):
cell = mock.Mock()
cell.rheader.stream_id = 1
self.circuit._streams = {}
self.circuit._processRelaySendMeCell(cell, 2)
self.assertTrue(mock_logging.debug.called)
def test_processRelayTruncatedCell(self):
cell = mock.Mock()
self.circuit._sendDestroyCell = mock.Mock()
self.circuit._closeCircuit = mock.Mock()
origin = 2
self.circuit._processRelayTruncatedCell(cell, origin)
self.assertEqual(self.circuit._sendDestroyCell.call_count, 1)
self.assertEqual(self.circuit._closeCircuit.call_count, 1)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_processRelayDropCell(self, mock_logging):
self.circuit._processRelayDropCell('test', 2)
self.assertTrue(mock_logging.debug.called)
@mock.patch('oppy.circuit.circuit.logging', autospec=True)
def test_processRelayResolvedCell(self, mock_logging):
cell = mock.Mock()
cell.rheader = mock.Mock()
cell.rheader.stream_id = 1
self.circuit._processRelayResolvedCell(cell, 2)
self.assertTrue(mock_logging.debug.called)
@mock.patch('oppy.circuit.circuit.RelaySendMeCell', autospec=True)
def test_decDeliverWindow_at_threshold(self, mock_relay_sendme_cell):
cell = mock.Mock()
mock_relay_sendme_cell.make.return_value = cell
self.circuit._encryptAndSendCell = mock.Mock()
self.circuit._encryptAndSendCell.return_value = cell
| |
"""A smart hat guessing Hanabi player.
A strategy for 4 or 5 players which uses "hat guessing" to convey information
to all other players with a single clue. See doc_hat_player.md for a detailed
description of the strategy. The following table gives the approximate
percentages of this strategy reaching maximum score (over 10000 games).
Players | % (no variant) | % (purple) | % (rainbow)
--------+----------------+------------+-------------
4 | 94.2 | 94.4 | 94.2
5 | 91.2 | 95.7 | 95.7
"""
from hanabi_classes import *
from bot_utils import *
from copy import copy
# the cluer gives decides for the first clued player what to do.
# If true, the cluer can tell the player to discard, including cards which might be useful later
# If false, the clued player can clue instead of discarding
MODIFIEDACTION = True
DEBUG = True
DEBUGVALUES = ['play 5 instead', 'someone cannot clue, but I have a play', 'unsafe discard at 0 clues', 'safe discard at 0 clues', \
'clue blocked', 'I misplayed', 'BUG: instructed to discard at 8 clues', 'BUG: instructed to clue with 0 clues', 'instructing to discard critical card',
'player did wrong action at >0 clues', 'player did not play', 'player played wrong card', 'wrong action: discard at 0 clues', 'someone performed the wrong action',
'player played when not instructed to', 'we can use the clue from a 5 to reach another player in endgame', 'yolo', 'successful yolo', 'unsuccessful yolo']
### General utility functions, maybe these should be moved to bot_utils.py
def prev_cardname(cardname):
"""The card with cardname below `cardname`. Doesn't check whether there is a card below"""
return str(int(cardname[0]) - 1) + cardname[1]
def list_between(begin, end, r):
"""Returns the list of players from begin to end (inclusive)."""
if begin <= end:
return list(range(begin, end+1))
return list(range(begin, r.nPlayers)) + list(range(end+1))
class HatPlayer(AIPlayer):
@classmethod
def get_name(cls):
return 'hat'
### utility functions specific to this strategy
def number_to_action(self, n):
"""Returns action corresponding to a number.
0 means give any clue
1 means play newest (which is slot -1)
2 means play 2nd newest, etc.
5 means discard newest
6 means discard 2nd newest etc.
"""
if n == 0:
return 'hint', 0
elif n <= 4:
return 'play', 4 - n
return 'discard', 8 - n
def action_to_number(self, action):
"""Returns number corresponding to an action as represented in this bot
(where the second component is the *position* of the card played/discarded). """
if action[0] == 'hint':
return 0
return 4 - action[1] + (0 if action[0] == 'play' else 4)
def interpret_external_action(self, action):
"""Interprets an action in the log. Returns a pair action, card
action uses the internal encoding of actions:
- 'play', n means play slot n (note: slot n, or r.h[player].cards[n] counts from oldest to newest, so slot 0 is oldest)
- 'discard', n means discard slot n
- 'hint', 0 means give a hint
card is the played or discarded card (otherwise None)"""
if action[0] == 'hint':
return ('hint', 0), None
return (action[0], action[1]['position']), action[1]
def clue_to_number(self, target, value, clueGiver, r):
"""Returns number corresponding to a clue.
In 4 players:
clue rank to newest card (slot -1) of next player means 0
clue color to newest card of next player means 1
clue anything that doesn't touch newest card to next player means 2
every skipped player adds 3
In 5 players, any clue to any player that doesn't touch the newest card in that hand means 8,
independent of the receiver of the clue.
For all other clues, every skipped player only adds 2
"""
cards = r.h[target].cards
if cards[-1]['indirect'] and cards[-1]['indirect'][-1] == value:
x = 2
elif value in r.suits:
x = 1
else:
x = 0
if r.nPlayers == 4:
return 3 * ((target - clueGiver - 1) % r.nPlayers) + x
elif x == 2:
return 8
else:
return 2 * ((target - clueGiver - 1) % r.nPlayers) + x
def number_to_clue(self, cluenumber, me, r):
"""Returns number corresponding to a clue."""
x = cluenumber % (3 if r.nPlayers == 4 else 2)
if r.nPlayers == 4:
target = (me + 1 + cluenumber // 3) % r.nPlayers
elif cluenumber != 8:
target = (me + 1 + cluenumber // 2) % r.nPlayers
else: # in 5 players, to convey clue number 8 we clue any non-newest card
for target in range(r.nPlayers):
if target != me:
cards = r.h[target].cards
clue = self.clue_not_newest(cards, r)
if clue: return (target, clue)
# this can theoretically happen, but will never happen in practice
if DEBUG: r.debug['clue blocked'] += 1
target = next(me, r)
return (target, r.h[target].cards[-1]['name'][0])
assert target != me
cards = r.h[target].cards
if x == 0: clue = cards[-1]['name'][0]
if x == 1:
if cards[-1]['name'][1] != RAINBOW_SUIT:
clue = cards[-1]['name'][1]
else:
clue = VANILLA_SUITS[2]
if x == 2:
clue = self.clue_not_newest(cards, r)
if clue: return (target, clue)
if DEBUG: r.debug['clue blocked'] += 1
# if the clue is blocked, we currently just return another clue.
# todo: We should add a list of blocked clues to modified_action, and give the player any non-blocked action
clue = cards[-1]['name'][0]
return (target, clue)
def clue_not_newest(self, cards, r):
"""Return any clue that does not touch the newest card (slot -1) in `cards`.
Returns False if no such clue exists"""
newest = cards[-1]['name']
for i in range(len(cards)-1):
cardname = cards[i]['name']
if cardname[0] != newest[0]:
return cardname[0]
if cardname[1] != newest[1] and newest[1] != RAINBOW_SUIT:
if cardname[1] != RAINBOW_SUIT:
return cardname[1]
if newest[1] != VANILLA_SUITS[0]:
return VANILLA_SUITS[0]
return VANILLA_SUITS[1]
return False
def recover_hand(self, player, r):
"""Recover the hand of `player` when the current clue was given"""
cards = r.h[player].cards.copy()
if not self.given_clues:
return cards
if player not in self.given_clues[0]['plays']:
return cards
action, card = self.given_clues[0]['plays'][player]
if action[0] == 'hint':
return cards
pos = action[1]
if len(cards) == 4:
cards.pop()
cards.insert(pos, card)
return cards
### Initialization functions
def initialize_memory(self, r):
"""Initializes the memory of a player.
These are all the variables that players have set between turns,
to interpret future clues and give clues myself. These will be updated by the
function 'think_at_turn_start', which is executed for every player at the start of every turn,
and also when giving a clue."""
# do I have a card which can be safely discarded?
# todo: make this a list of list, with a list of known useless cards for each player,
# stack safe discards when you would currently give no new information to a player
# if you give a discard clue, clue the oldest unknown useless card (hint if it doesn't exist?)
# instead of repeating a play clue, mark oldest unknown useless card
# make a list of useful cards in your hand
# a card is known useful when it was behind a card which was marked useless
# it should be a list containing pairs (card, dic), where card is the card object,
# dic is a dictionary {c:n for c in r.suits} where n is the minimal value of the card (which is r.progress[c] + 2) (approximately?) if n <= 5
# In the last round of the game, play a known useful card, or the most likely useful card in your hand if you don't see all cards
self.useless_card = None
# information of all clues given, which are not fully interpreted yet. It is a list of given clues
# Every clue consists of a dict with as info:
# 'value': clue value
# 'cluer': the player who clued
# 'plays': the plays that happened after the clue, stored as a dictionary player:(action, card) (card is Null if a hint is given).
# 'discarded': the discard pile the moment the clue is given
self.given_clues = []
# The following five variables consist of extra information about the 0-th given_clue.
# todo: There is some redundant information, maybe we can refactor some away.
# what the other players will do with that clue, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.